code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCAmelCase , UpperCAmelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase_ = deepcopy(UpperCAmelCase )
elif os.path.exists(UpperCAmelCase ):
with io.open(UpperCAmelCase , "r" , encoding="utf-8" ) as f:
lowercase_ = json.load(UpperCAmelCase )
else:
try:
lowercase_ = baseaa.urlsafe_baadecode(UpperCAmelCase ).decode("utf-8" )
lowercase_ = json.loads(UpperCAmelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' )
lowercase_ = config
self.set_stage_and_offload()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = self.get_value("zero_optimization.stage" , -1 )
# offload
lowercase_ = False
if self.is_zeroa() or self.is_zeroa():
lowercase_ = set(["cpu", "nvme"] )
lowercase_ = set(
[
self.get_value("zero_optimization.offload_optimizer.device" ),
self.get_value("zero_optimization.offload_param.device" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase_ = True
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = self.config
# find the config node of interest if it exists
lowercase_ = ds_key_long.split("." )
lowercase_ = nodes.pop()
for node in nodes:
lowercase_ = config.get(UpperCAmelCase )
if config is None:
return None, ds_key
return config, ds_key
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> List[Any]:
'''simple docstring'''
lowercase_ , lowercase_ = self.find_config_node(UpperCAmelCase )
if config is None:
return default
return config.get(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
lowercase_ = self.config
# find the config node of interest if it exists
lowercase_ = ds_key_long.split("." )
for node in nodes:
lowercase_ = config
lowercase_ = config.get(UpperCAmelCase )
if config is None:
if must_exist:
raise ValueError(F'Can\'t find {ds_key_long} entry in the config: {self.config}' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.get_value(UpperCAmelCase )
return False if value is None else bool(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = self.get_value(UpperCAmelCase )
return False if value is None else not bool(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return self._stage == 2
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return self._stage == 3
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._offload
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = engine
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
self.engine.backward(UpperCAmelCase , **UpperCAmelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase , device_placement=UpperCAmelCase , scaler=UpperCAmelCase )
lowercase_ = hasattr(self.optimizer , "overflow" )
def A__ ( self , UpperCAmelCase=None ) -> str:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=0.001 , UpperCAmelCase=0 , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = params
lowercase_ = lr
lowercase_ = weight_decay
lowercase_ = kwargs
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=0 , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = optimizer
lowercase_ = total_num_steps
lowercase_ = warmup_num_steps
lowercase_ = kwargs
| 297
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297
| 1
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=8 ):
'''simple docstring'''
lowercase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , movq=UpperCAmelCase , )
lowercase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
if latents is None:
lowercase_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase_ = latents.to(UpperCAmelCase )
lowercase_ = latents * scheduler.init_noise_sigma
return latents
def A__ ( self , UpperCAmelCase=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ = torch.device(F'cuda:{gpu_id}' )
lowercase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase=0 ) -> int:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ = cpu_offload_with_hook(UpperCAmelCase , UpperCAmelCase , prev_module_hook=UpperCAmelCase )
# We'll offload the last model manually.
lowercase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A__ ( self ) -> Any:
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase )
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 512 , UpperCAmelCase = 512 , UpperCAmelCase = 100 , UpperCAmelCase = 4.0 , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ) -> Any:
'''simple docstring'''
lowercase_ = self._execution_device
lowercase_ = guidance_scale > 1.0
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase_ = torch.cat(UpperCAmelCase , dim=0 )
lowercase_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase_ = torch.cat(UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
lowercase_ = image_embeds.repeat_interleave(UpperCAmelCase , dim=0 )
lowercase_ = negative_image_embeds.repeat_interleave(UpperCAmelCase , dim=0 )
lowercase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase )
self.scheduler.set_timesteps(UpperCAmelCase , device=UpperCAmelCase )
lowercase_ = self.scheduler.timesteps
lowercase_ = self.unet.config.in_channels
lowercase_ , lowercase_ = downscale_height_and_width(UpperCAmelCase , UpperCAmelCase , self.movq_scale_factor )
# create initial latent
lowercase_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ = {"image_embeds": image_embeds}
lowercase_ = self.unet(
sample=UpperCAmelCase , timestep=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , added_cond_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ = noise_pred.chunk(2 )
lowercase_ , lowercase_ = variance_pred.chunk(2 )
lowercase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ = self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase , )[0]
# post-processing
lowercase_ = self.movq.decode(UpperCAmelCase , force_not_quantize=UpperCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowercase_ = image * 0.5 + 0.5
lowercase_ = image.clamp(0 , 1 )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 297
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
| 1
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 1
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
lowercase_ = ""
lowercase_ = ""
lowercase_ = []
lowercase_ = 0
lowercase_ = 256
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = cva.imread(UpperCAmelCase , 0 )
lowercase_ = copy.deepcopy(self.img )
lowercase_ , lowercase_ , lowercase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
lowercase_ = np.sum(UpperCAmelCase )
for i in range(len(UpperCAmelCase ) ):
lowercase_ = x[i] / self.k
self.sk += prk
lowercase_ = (self.L - 1) * self.sk
if self.rem != 0:
lowercase_ = int(last % last )
lowercase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCAmelCase )
lowercase_ = int(np.ma.count(self.img ) / self.img[1].size )
lowercase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase_ = self.img[j][i]
if num != self.last_list[num]:
lowercase_ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A__ ( self ) -> str:
'''simple docstring'''
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
SCREAMING_SNAKE_CASE__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 297
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = KandinskyVaaPipeline
lowerCAmelCase__ = [
"image_embeds",
"negative_image_embeds",
]
lowerCAmelCase__ = ["image_embeds", "negative_image_embeds"]
lowerCAmelCase__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase__ = False
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self ) -> str:
'''simple docstring'''
return 100
@property
def A__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowercase_ = UNetaDConditionModel(**UpperCAmelCase )
return model
@property
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = self.dummy_unet
lowercase_ = self.dummy_movq
lowercase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCAmelCase , )
lowercase_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> List[str]:
'''simple docstring'''
lowercase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowercase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase )
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu"
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = pipe(**self.get_dummy_inputs(UpperCAmelCase ) )
lowercase_ = output.images
lowercase_ = pipe(
**self.get_dummy_inputs(UpperCAmelCase ) , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
lowercase_ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase )
lowercase_ = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowercase_ = pipeline.to(UpperCAmelCase )
pipeline.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "red cat, 4k photo"
lowercase_ = torch.Generator(device="cuda" ).manual_seed(0 )
lowercase_ , lowercase_ = pipe_prior(
UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowercase_ = torch.Generator(device="cuda" ).manual_seed(0 )
lowercase_ = pipeline(
image_embeds=UpperCAmelCase , negative_image_embeds=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 1
|
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = """Muhammad Umer Farooq"""
SCREAMING_SNAKE_CASE__ = """MIT"""
SCREAMING_SNAKE_CASE__ = """1.0.0"""
SCREAMING_SNAKE_CASE__ = """Muhammad Umer Farooq"""
SCREAMING_SNAKE_CASE__ = """contact@muhammadumerfarooq.me"""
SCREAMING_SNAKE_CASE__ = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__()
lowercase_ = []
lowercase_ = domain
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowercase_ = parse.urljoin(self.domain , UpperCAmelCase )
self.urls.append(UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
return ".".join(get_sub_domain_name(__lowerCamelCase ).split("." )[-2:] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
return parse.urlparse(__lowerCamelCase ).netloc
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str = "https://github.com" ):
'''simple docstring'''
lowercase_ = get_domain_name(__lowerCamelCase )
# Initialize the parser
lowercase_ = Parser(__lowerCamelCase )
try:
# Open URL
lowercase_ = requests.get(__lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowercase_ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowercase_ = requests.get(__lowerCamelCase )
# Get the valid email.
lowercase_ = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 297
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 1
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
SCREAMING_SNAKE_CASE__ = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
SCREAMING_SNAKE_CASE__ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = WATERMARK_BITS
lowercase_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def A__ ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if images.shape[-1] < 256:
return images
lowercase_ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = [self.encoder.encode(UpperCAmelCase , "dwtDct" ) for image in images]
lowercase_ = torch.from_numpy(np.array(UpperCAmelCase ) ).permute(0 , 3 , 1 , 2 )
lowercase_ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 1
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 1
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: torch.nn.Module , __lowerCamelCase: BnbQuantizationConfig , __lowerCamelCase: Union[str, os.PathLike] = None , __lowerCamelCase: Optional[Dict[str, Union[int, str, torch.device]]] = None , __lowerCamelCase: Optional[List[str]] = None , __lowerCamelCase: Optional[Dict[Union[int, str], Union[int, str]]] = None , __lowerCamelCase: Optional[Union[str, os.PathLike]] = None , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = bnb_quantization_config.load_in_abit
lowercase_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
lowercase_ = []
# custom device map
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1:
lowercase_ = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase_ = get_keys_to_not_convert(__lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCamelCase )
lowercase_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase_ = []
lowercase_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCamelCase )
# compatibility with peft
lowercase_ = load_in_abit
lowercase_ = load_in_abit
lowercase_ = get_parameter_device(__lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
lowercase_ = replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
# convert param to the right dtype
lowercase_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase_ = name.replace(".weight" , "" ).replace(".bias" , "" )
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCamelCase ):
param.to(__lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F'The model device type is {model_device.type}. However, cuda is needed for quantization.'
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
lowercase_ = replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
lowercase_ = get_quantized_model_device_map(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase_ = True
lowercase_ = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: str=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
lowercase_ = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
lowercase_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase_ = {}
lowercase_ = special_dtypes
lowercase_ = no_split_module_classes
lowercase_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase_ = get_balanced_memory(
__lowerCamelCase , low_zero=(device_map == "balanced_low_0") , max_memory=__lowerCamelCase , **__lowerCamelCase , )
lowercase_ = max_memory
lowercase_ = infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
# check if don't have any quantized module on the cpu
lowercase_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Dict=None , __lowerCamelCase: int=None ):
'''simple docstring'''
if modules_to_not_convert is None:
lowercase_ = []
lowercase_ , lowercase_ = _replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Dict=None , ):
'''simple docstring'''
lowercase_ = False
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(__lowerCamelCase )
if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase_ = ".".join(__lowerCamelCase )
lowercase_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
lowercase_ = module.weight.data
if module.bias is not None:
lowercase_ = module.bias.data
bnb_module.requires_grad_(__lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase_ = True
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
with init_empty_weights():
lowercase_ = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase_ = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(__lowerCamelCase , [] )
lowercase_ = len(__lowerCamelCase ) > 0
# Check if it is a base model
lowercase_ = False
if hasattr(__lowerCamelCase , "base_model_prefix" ):
lowercase_ = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(__lowerCamelCase ) - set(__lowerCamelCase )
lowercase_ = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowercase_ = [".weight", ".bias"]
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(__lowerCamelCase , "" )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
for m in model.modules():
if isinstance(__lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: nn.Module ):
'''simple docstring'''
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase )
lowercase_ = param_name
lowercase_ = model
if "." in tensor_name:
lowercase_ = tensor_name.split("." )
for split in splits[:-1]:
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
lowercase_ = new_module
lowercase_ = splits[-1]
# offload weights
lowercase_ = False
offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __lowerCamelCase , index=__lowerCamelCase , )
else:
offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
offload_weight(__lowerCamelCase , param_name.replace("weight" , "SCB" ) , __lowerCamelCase , index=__lowerCamelCase )
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , "meta" , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
| 297
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
| 1
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square(__lowerCamelCase: int , __lowerCamelCase: int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ = update_area_of_max_square(__lowerCamelCase , col + 1 )
lowercase_ = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ = update_area_of_max_square(row + 1 , __lowerCamelCase )
if mat[row][col]:
lowercase_ = 1 + min([right, diagonal, down] )
lowercase_ = max(largest_square_area[0] , __lowerCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
__lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ = update_area_of_max_square_using_dp_array(__lowerCamelCase , col + 1 , __lowerCamelCase )
lowercase_ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __lowerCamelCase )
lowercase_ = update_area_of_max_square_using_dp_array(row + 1 , __lowerCamelCase , __lowerCamelCase )
if mat[row][col]:
lowercase_ = 1 + min([right, diagonal, down] )
lowercase_ = max(largest_square_area[0] , __lowerCamelCase )
lowercase_ = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ = [0]
lowercase_ = [[-1] * cols for _ in range(__lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , __lowerCamelCase )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: list[list[int]] ):
'''simple docstring'''
lowercase_ = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ = dp_array[row][col + 1]
lowercase_ = dp_array[row + 1][col + 1]
lowercase_ = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase_ = max(dp_array[row][col] , __lowerCamelCase )
else:
lowercase_ = 0
return largest_square_area
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: list[list[int]] ):
'''simple docstring'''
lowercase_ = [0] * (cols + 1)
lowercase_ = [0] * (cols + 1)
lowercase_ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ = current_row[col + 1]
lowercase_ = next_row[col + 1]
lowercase_ = next_row[col]
if mat[row][col] == 1:
lowercase_ = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase_ = max(current_row[col] , __lowerCamelCase )
else:
lowercase_ = 0
lowercase_ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 297
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any=None ):
'''simple docstring'''
if subparsers is not None:
lowercase_ = subparsers.add_parser("tpu-config" , description=_description )
else:
lowercase_ = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
lowercase_ = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=__lowerCamelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=__lowerCamelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
lowercase_ = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=__lowerCamelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=__lowerCamelCase )
return parser
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCamelCase ):
lowercase_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase_ = defaults.command_file
if not args.command and defaults.commands is not None:
lowercase_ = defaults.commands
if not args.tpu_name:
lowercase_ = defaults.tpu_name
if not args.tpu_zone:
lowercase_ = defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase_ = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
lowercase_ = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , __lowerCamelCase ):
lowercase_ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
lowercase_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCamelCase ):
lowercase_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase_ = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase_ = "; ".join(__lowerCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase_ = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(__lowerCamelCase )}' )
return
subprocess.run(__lowerCamelCase )
print("Successfully setup pod." )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = tpu_command_parser()
lowercase_ = parser.parse_args()
tpu_command_launcher(__lowerCamelCase )
| 297
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 1
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
lowercase_ = -1
lowercase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
lowercase_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
lowercase_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase_ = TextStreamer(UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
lowercase_ = -1
lowercase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
lowercase_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
lowercase_ = tokenizer.decode(greedy_ids[0] )
lowercase_ = TextIteratorStreamer(UpperCAmelCase )
lowercase_ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase_ = Thread(target=model.generate , kwargs=UpperCAmelCase )
thread.start()
lowercase_ = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
lowercase_ = -1
lowercase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
lowercase_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
lowercase_ = greedy_ids[:, input_ids.shape[1] :]
lowercase_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase_ = TextStreamer(UpperCAmelCase , skip_prompt=UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase_ = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(UpperCAmelCase )
lowercase_ = -1
lowercase_ = torch.ones((1, 5) , device=UpperCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase_ = TextStreamer(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=1 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase_ = cs.out[:-1] # Remove the final "\n"
lowercase_ = tokenizer(UpperCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
lowercase_ = -1
lowercase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
lowercase_ = TextIteratorStreamer(UpperCAmelCase , timeout=0.001 )
lowercase_ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase_ = Thread(target=model.generate , kwargs=UpperCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase ):
lowercase_ = ""
for new_text in streamer:
streamer_text += new_text
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297
| 1
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
SCREAMING_SNAKE_CASE__ = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = tokenizer
lowercase_ = dataset
lowercase_ = len(UpperCAmelCase ) if n_tasks is None else n_tasks
lowercase_ = n_copies
def __iter__( self ) -> int:
'''simple docstring'''
lowercase_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
lowercase_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = start_length
lowercase_ = eof_strings
lowercase_ = tokenizer
def __call__( self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowercase_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = re.split("(%s)" % "|".join(__lowerCamelCase ) , __lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: int=20 , **__lowerCamelCase: List[str] ):
'''simple docstring'''
lowercase_ = defaultdict(__lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCamelCase ) ):
with torch.no_grad():
lowercase_ = batch["ids"].shape[-1]
lowercase_ = accelerator.unwrap_model(__lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCamelCase , **__lowerCamelCase )
# each task is generated batch_size times
lowercase_ = batch["task_id"].repeat(__lowerCamelCase )
lowercase_ = accelerator.pad_across_processes(
__lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowercase_ , lowercase_ = accelerator.gather((generated_tokens, generated_tasks) )
lowercase_ = generated_tokens.cpu().numpy()
lowercase_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCamelCase , __lowerCamelCase ):
gen_token_dict[task].append(__lowerCamelCase )
lowercase_ = [[] for _ in range(__lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowercase_ = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
code_gens[task].append(remove_last_block(__lowerCamelCase ) )
return code_gens
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser(__lowerCamelCase )
lowercase_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowercase_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowercase_ = "false"
if args.num_workers is None:
lowercase_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowercase_ = Accelerator()
set_seed(args.seed , device_specific=__lowerCamelCase )
# Load model and tokenizer
lowercase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase_ = tokenizer.eos_token
lowercase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowercase_ = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCamelCase , __lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowercase_ = load_dataset("openai_humaneval" )
lowercase_ = load_metric("code_eval" )
lowercase_ = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
lowercase_ = args.n_samples // args.batch_size
lowercase_ = TokenizedDataset(__lowerCamelCase , human_eval["test"] , n_copies=__lowerCamelCase , n_tasks=__lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowercase_ = DataLoader(__lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowercase_ = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
lowercase_ , lowercase_ = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
lowercase_ = complete_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , n_tasks=__lowerCamelCase , batch_size=args.batch_size , **__lowerCamelCase , )
if accelerator.is_main_process:
lowercase_ = []
for task in tqdm(range(__lowerCamelCase ) ):
lowercase_ = human_eval["test"][task]["test"]
lowercase_ = F'check({human_eval["test"][task]["entry_point"]})'
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
lowercase_ , lowercase_ = code_eval_metric.compute(
references=__lowerCamelCase , predictions=__lowerCamelCase , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 297
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 1
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
SCREAMING_SNAKE_CASE__ = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any]=False ):
'''simple docstring'''
lowercase_ , lowercase_ = create_model(
"HTSAT-tiny" , "roberta" , __lowerCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__lowerCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = r".*sequential.(\d+).*"
lowercase_ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase_ = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
# replace sequential layers with list
lowercase_ = re.match(__lowerCamelCase , __lowerCamelCase ).group(1 )
lowercase_ = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(__lowerCamelCase )//3}.linear.' )
elif re.match(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowercase_ = 1 if projecton_layer == 0 else 2
lowercase_ = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowercase_ = value
lowercase_ = mixed_qkv.size(0 ) // 3
lowercase_ = mixed_qkv[:qkv_dim]
lowercase_ = mixed_qkv[qkv_dim : qkv_dim * 2]
lowercase_ = mixed_qkv[qkv_dim * 2 :]
lowercase_ = query_layer
lowercase_ = key_layer
lowercase_ = value_layer
else:
lowercase_ = value
return model_state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int]=False ):
'''simple docstring'''
lowercase_ , lowercase_ = init_clap(__lowerCamelCase , enable_fusion=__lowerCamelCase )
clap_model.eval()
lowercase_ = clap_model.state_dict()
lowercase_ = rename_state_dict(__lowerCamelCase )
lowercase_ = ClapConfig()
lowercase_ = enable_fusion
lowercase_ = ClapModel(__lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
transformers_config.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 297
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
| 1
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 1
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """https://openaipublic.azureedge.net/jukebox/models/"""
SCREAMING_SNAKE_CASE__ = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
lowercase_ = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
lowercase_ = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
lowercase_ = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
lowercase_ = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
lowercase_ = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
lowercase_ = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowercase_ = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
lowercase_ = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = {}
import re
lowercase_ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowercase_ = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase_ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowercase_ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowercase_ = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase_ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowercase_ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
lowercase_ = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase_ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__lowerCamelCase ):
lowercase_ = re_encoder_block_conv_in.match(__lowerCamelCase )
lowercase_ = regex_match.groups()
lowercase_ = int(groups[2] ) * 2 + int(groups[3] )
lowercase_ = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
lowercase_ = re_encoder_block_conv_in.sub(__lowerCamelCase , __lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(__lowerCamelCase ):
lowercase_ = re_encoder_block_resnet.match(__lowerCamelCase )
lowercase_ = regex_match.groups()
lowercase_ = int(groups[2] ) * 2 + int(groups[3] )
lowercase_ = {"1": 1, "3": 2}[groups[-2]]
lowercase_ = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
lowercase_ = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase_ = prefix + resnet_block
lowercase_ = re_encoder_block_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(__lowerCamelCase ):
lowercase_ = re_encoder_block_proj_out.match(__lowerCamelCase )
lowercase_ = regex_match.groups()
lowercase_ = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
lowercase_ = re_encoder_block_proj_out.sub(__lowerCamelCase , __lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__lowerCamelCase ):
lowercase_ = re_decoder_block_conv_out.match(__lowerCamelCase )
lowercase_ = regex_match.groups()
lowercase_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase_ = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
lowercase_ = re_decoder_block_conv_out.sub(__lowerCamelCase , __lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(__lowerCamelCase ):
lowercase_ = re_decoder_block_resnet.match(__lowerCamelCase )
lowercase_ = regex_match.groups()
lowercase_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase_ = {"1": 1, "3": 2}[groups[-2]]
lowercase_ = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
lowercase_ = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase_ = prefix + resnet_block
lowercase_ = re_decoder_block_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(__lowerCamelCase ):
lowercase_ = re_decoder_block_proj_in.match(__lowerCamelCase )
lowercase_ = regex_match.groups()
lowercase_ = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
lowercase_ = re_decoder_block_proj_in.sub(__lowerCamelCase , __lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__lowerCamelCase ):
lowercase_ = re_prior_cond_conv_out.match(__lowerCamelCase )
lowercase_ = regex_match.groups()
lowercase_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase_ = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
lowercase_ = re_prior_cond_conv_out.sub(__lowerCamelCase , __lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(__lowerCamelCase ):
lowercase_ = re_prior_cond_resnet.match(__lowerCamelCase )
lowercase_ = regex_match.groups()
lowercase_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase_ = {"1": 1, "3": 2}[groups[-2]]
lowercase_ = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
lowercase_ = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase_ = prefix + resnet_block
lowercase_ = re_prior_cond_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(__lowerCamelCase ):
lowercase_ = re_prior_cond_proj_in.match(__lowerCamelCase )
lowercase_ = regex_match.groups()
lowercase_ = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
lowercase_ = re_prior_cond_proj_in.sub(__lowerCamelCase , __lowerCamelCase )
# keep original key
else:
lowercase_ = original_key
lowercase_ = replace_key(__lowerCamelCase )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
lowercase_ = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
lowercase_ = original_key
lowercase_ = original_key
lowercase_ = value
return new_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple=None , __lowerCamelCase: List[str]=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
lowercase_ = requests.get(F'{PREFIX}{file}' , allow_redirects=__lowerCamelCase )
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=__lowerCamelCase )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , "wb" ).write(r.content )
lowercase_ = MODEL_MAPPING[model_name.split("/" )[-1]]
lowercase_ = JukeboxConfig.from_pretrained(__lowerCamelCase )
lowercase_ = JukeboxModel(__lowerCamelCase )
lowercase_ = []
lowercase_ = {}
for i, dict_name in enumerate(__lowerCamelCase ):
lowercase_ = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["model"]
lowercase_ = {}
for k in old_dic.keys():
if k.endswith(".b" ):
lowercase_ = old_dic[k]
elif k.endswith(".w" ):
lowercase_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowercase_ = old_dic[k]
else:
lowercase_ = old_dic[k]
lowercase_ = "vqvae" if i == 0 else F'priors.{3 - i}'
lowercase_ = fix_jukebox_keys(__lowerCamelCase , model.state_dict() , __lowerCamelCase , __lowerCamelCase )
weight_dict.append(__lowerCamelCase )
lowercase_ = weight_dict.pop(0 )
model.vqvae.load_state_dict(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
with open(F'{pytorch_dump_folder_path}/mapping.json' , "w" ) as txtfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 297
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297
| 1
|
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Image , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__lowerCamelCase: int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
SCREAMING_SNAKE_CASE__ = change_contrast(img, 1_7_0)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 297
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = JukeboxTokenizer
lowerCAmelCase__ = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
import torch
lowercase_ = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
lowercase_ = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase_ = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def A__ ( self ) -> Tuple:
'''simple docstring'''
import torch
lowercase_ = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
lowercase_ = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowercase_ = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 297
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = MvpTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 297
| 1
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
inputs.update({"image_embeds": None} )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 1
|
SCREAMING_SNAKE_CASE__ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 297
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297
| 1
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ , lowercase_ = 9, 14 # noqa: F841
lowercase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowercase_ = defaultdict(__lowerCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowercase_ = mst(__lowerCamelCase )
lowercase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowercase_ = tuple(answer[:2] )
lowercase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 297
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297
| 1
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = LEDTokenizer
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> int:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase = None , UpperCAmelCase = None , ) -> dict:
'''simple docstring'''
lowercase_ = super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["global_attention_mask"] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase_ = len(UpperCAmelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 297
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297
| 1
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = """▁"""
SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = BigBirdTokenizer
lowerCAmelCase__ = BigBirdTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def A__ ( self ) -> str:
'''simple docstring'''
super().setUp()
lowercase_ = self.tokenizer_class(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = "<s>"
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(UpperCAmelCase ) , 1004 )
def A__ ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = "I was born in 92000, and this is falsé."
lowercase_ = tokenizer.tokenize(UpperCAmelCase )
lowercase_ = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowercase_ = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCAmelCase )
lowercase_ = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = BigBirdTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowercase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowercase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "Hello World!"
lowercase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowercase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A__ ( self ) -> str:
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase_ = " ".join(UpperCAmelCase )
lowercase_ = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors="pt" , return_token_type_ids=UpperCAmelCase )
lowercase_ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=UpperCAmelCase )
lowercase_ = BigBirdConfig(attention_type="original_full" )
lowercase_ = BigBirdModel(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowercase_ = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = {"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 297
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 1
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=14 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0.02 , ) -> List[Any]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = rotary_dim
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = None
lowercase_ = vocab_size - 1
lowercase_ = vocab_size - 1
lowercase_ = vocab_size - 1
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 20
lowercase_ = model_class_name(UpperCAmelCase )
lowercase_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowercase_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowercase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowercase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowercase_ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowercase_ = model(UpperCAmelCase )
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 20
lowercase_ = model_class_name(UpperCAmelCase )
lowercase_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowercase_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowercase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowercase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowercase_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = FlaxGPTJModelTester(self )
def A__ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase_ , lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase_ , lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
lowercase_ = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowercase_ = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
lowercase_ = False
lowercase_ = model.config.eos_token_id
lowercase_ = jax.jit(model.generate )
lowercase_ = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
lowercase_ = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowercase_ = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowercase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase_ = getattr(UpperCAmelCase , UpperCAmelCase )
lowercase_ , lowercase_ = pt_inputs["input_ids"].shape
lowercase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowercase_ = 0
lowercase_ = 1
lowercase_ = 0
lowercase_ = 1
lowercase_ = pt_model_class(UpperCAmelCase ).eval()
lowercase_ = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowercase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowercase_ = fx_state
with torch.no_grad():
lowercase_ = pt_model(**UpperCAmelCase ).to_tuple()
lowercase_ = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowercase_ = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowercase_ = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowercase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase_ = getattr(UpperCAmelCase , UpperCAmelCase )
lowercase_ = pt_model_class(UpperCAmelCase ).eval()
lowercase_ = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowercase_ = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowercase_ , lowercase_ = pt_inputs["input_ids"].shape
lowercase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowercase_ = 0
lowercase_ = 1
lowercase_ = 0
lowercase_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase_ = pt_model(**UpperCAmelCase ).to_tuple()
lowercase_ = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowercase_ = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowercase_ = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A__ ( self ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
lowercase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 297
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 1
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
SCREAMING_SNAKE_CASE__ = ["""gpt2"""]
SCREAMING_SNAKE_CASE__ = """gpt2"""
if is_tf_available():
class __lowerCamelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> str:
'''simple docstring'''
super().__init__()
lowercase_ = tokenizer
lowercase_ = AutoConfig.from_pretrained(UpperCAmelCase )
lowercase_ = TFGPTaLMHeadModel.from_config(UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = self.tokenizer(UpperCAmelCase )
lowercase_ = tokenized["input_ids"].to_tensor()
lowercase_ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowercase_ = self.model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )["logits"]
return outputs
@require_tf
@require_keras_nlp
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
lowercase_ = [GPTaTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowercase_ = [TFGPTaTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase_ = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
lowercase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A__ ( self ) -> int:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowercase_ = tokenizer([test_inputs] , return_tensors="tf" )
lowercase_ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowercase_ = python_outputs[key].numpy()
lowercase_ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def A__ ( self ) -> Dict:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = tf.function(UpperCAmelCase )
for test_inputs in self.test_sentences:
lowercase_ = tf.constant(UpperCAmelCase )
lowercase_ = compiled_tokenizer(UpperCAmelCase )
lowercase_ = tf_tokenizer(UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A__ ( self ) -> List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = ModelToSave(tokenizer=UpperCAmelCase )
lowercase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ = model.serving(UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase_ = Path(UpperCAmelCase ) / "saved.model"
tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={"serving_default": model.serving} )
lowercase_ = tf.saved_model.load(UpperCAmelCase )
lowercase_ = loaded_model.signatures["serving_default"](UpperCAmelCase )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ = tf_tokenizer(UpperCAmelCase ) # Build model with some sample inputs
lowercase_ = tf_tokenizer.get_config()
lowercase_ = TFGPTaTokenizer.from_config(UpperCAmelCase )
lowercase_ = model_from_config(UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A__ ( self ) -> Dict:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowercase_ = 123123
for max_length in [3, 5, 1024]:
lowercase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ = tf_tokenizer(UpperCAmelCase , max_length=UpperCAmelCase )
lowercase_ = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 1
|
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Callable ):
'''simple docstring'''
@wraps(__lowerCamelCase )
def _inner_fn(*__lowerCamelCase: int , **__lowerCamelCase: List[str] ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , __lowerCamelCase , )
return fn(*__lowerCamelCase , **__lowerCamelCase )
return _inner_fn
| 297
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 1
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> Optional[int]:
'''simple docstring'''
super().__init__(UpperCAmelCase )
lowercase_ = proj_size
lowercase_ = CLIPVisionModel(UpperCAmelCase )
lowercase_ = PaintByExampleMapper(UpperCAmelCase )
lowercase_ = nn.LayerNorm(config.hidden_size )
lowercase_ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowercase_ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
lowercase_ = self.model(pixel_values=UpperCAmelCase )
lowercase_ = clip_output.pooler_output
lowercase_ = self.mapper(latent_states[:, None] )
lowercase_ = self.final_layer_norm(UpperCAmelCase )
lowercase_ = self.proj_out(UpperCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase_ = (config.num_hidden_layers + 1) // 5
lowercase_ = config.hidden_size
lowercase_ = 1
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase )
for _ in range(UpperCAmelCase )
] )
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
for block in self.blocks:
lowercase_ = block(UpperCAmelCase )
return hidden_states
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 1
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str=True , __lowerCamelCase: Union[str, Any]="pt" ):
'''simple docstring'''
lowercase_ = {"add_prefix_space": True} if isinstance(__lowerCamelCase , __lowerCamelCase ) and not line.startswith(" " ) else {}
lowercase_ = padding_side
return tokenizer(
[line] , max_length=__lowerCamelCase , padding="max_length" if pad_to_max_length else None , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Dict=None , ):
'''simple docstring'''
lowercase_ = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="train" , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase_ = Path(UpperCAmelCase ).joinpath(type_path + ".source" )
lowercase_ = Path(UpperCAmelCase ).joinpath(type_path + ".target" )
lowercase_ = self.get_char_lens(self.src_file )
lowercase_ = max_source_length
lowercase_ = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
lowercase_ = tokenizer
lowercase_ = prefix
if n_obs is not None:
lowercase_ = self.src_lens[:n_obs]
lowercase_ = src_lang
lowercase_ = tgt_lang
def __len__( self ) -> Dict:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowercase_ = index + 1 # linecache starts at 1
lowercase_ = self.prefix + linecache.getline(str(self.src_file ) , UpperCAmelCase ).rstrip("\n" )
lowercase_ = linecache.getline(str(self.tgt_file ) , UpperCAmelCase ).rstrip("\n" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCAmelCase ) else self.tokenizer
)
lowercase_ = self.tokenizer.generator if isinstance(self.tokenizer , UpperCAmelCase ) else self.tokenizer
lowercase_ = encode_line(UpperCAmelCase , UpperCAmelCase , self.max_source_length , "right" )
lowercase_ = encode_line(UpperCAmelCase , UpperCAmelCase , self.max_target_length , "right" )
lowercase_ = source_inputs["input_ids"].squeeze()
lowercase_ = target_inputs["input_ids"].squeeze()
lowercase_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A__ ( UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return [len(UpperCAmelCase ) for x in Path(UpperCAmelCase ).open().readlines()]
def A__ ( self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowercase_ = torch.stack([x["input_ids"] for x in batch] )
lowercase_ = torch.stack([x["attention_mask"] for x in batch] )
lowercase_ = torch.stack([x["decoder_input_ids"] for x in batch] )
lowercase_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase )
else self.tokenizer.pad_token_id
)
lowercase_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase )
else self.tokenizer.pad_token_id
)
lowercase_ = trim_batch(UpperCAmelCase , UpperCAmelCase )
lowercase_ , lowercase_ = trim_batch(UpperCAmelCase , UpperCAmelCase , attention_mask=UpperCAmelCase )
lowercase_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__ = getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = get_git_info()
save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , "git_log.json" ) )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: Optional[int]=4 , **__lowerCamelCase: Tuple ):
'''simple docstring'''
with open(__lowerCamelCase , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple ):
'''simple docstring'''
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = git.Repo(search_parent_directories=__lowerCamelCase )
lowercase_ = {
"repo_id": str(__lowerCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Callable , __lowerCamelCase: Iterable ):
'''simple docstring'''
return list(map(__lowerCamelCase , __lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: int ):
'''simple docstring'''
with open(__lowerCamelCase , "wb" ) as f:
return pickle.dump(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] ):
'''simple docstring'''
def remove_articles(__lowerCamelCase: Dict ):
return re.sub(r"\b(a|an|the)\b" , " " , __lowerCamelCase )
def white_space_fix(__lowerCamelCase: Optional[Any] ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase: Optional[Any] ):
lowercase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase: Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[Any] ):
'''simple docstring'''
lowercase_ = normalize_answer(__lowerCamelCase ).split()
lowercase_ = normalize_answer(__lowerCamelCase ).split()
lowercase_ = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
lowercase_ = sum(common.values() )
if num_same == 0:
return 0
lowercase_ = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: List[str] ):
'''simple docstring'''
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
lowercase_ = 0
for hypo, pred in zip(__lowerCamelCase , __lowerCamelCase ):
em += exact_match_score(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase_ = "dropout_rate"
for p in extra_params:
if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and not hasattr(__lowerCamelCase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
continue
lowercase_ = p if hasattr(__lowerCamelCase , __lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
return hparams, config
| 297
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 1
|
from copy import deepcopy
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = None , UpperCAmelCase = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
lowercase_ = size
lowercase_ = [0] * size
elif arr is not None:
self.init(UpperCAmelCase )
else:
raise ValueError("Either arr or size must be specified" )
def A__ ( self , UpperCAmelCase ) -> None:
'''simple docstring'''
lowercase_ = len(UpperCAmelCase )
lowercase_ = deepcopy(UpperCAmelCase )
for i in range(1 , self.size ):
lowercase_ = self.next_(UpperCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def A__ ( self ) -> list[int]:
'''simple docstring'''
lowercase_ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowercase_ = self.next_(UpperCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def A__ ( UpperCAmelCase ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def A__ ( UpperCAmelCase ) -> int:
'''simple docstring'''
return index - (index & (-index))
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowercase_ = self.next_(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
self.add(UpperCAmelCase , value - self.get(UpperCAmelCase ) )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
if right == 0:
return 0
lowercase_ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowercase_ = self.prev(UpperCAmelCase )
return result
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
return self.prefix(UpperCAmelCase ) - self.prefix(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self.query(UpperCAmelCase , index + 1 )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
lowercase_ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowercase_ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = (16, 32, 96, 2_56)
lowerCAmelCase__ = jnp.floataa
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase_ = self.block_out_channels[i]
lowercase_ = self.block_out_channels[i + 1]
lowercase_ = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase )
lowercase_ = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase )
lowercase_ = blocks
lowercase_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.conv_in(UpperCAmelCase )
lowercase_ = nn.silu(UpperCAmelCase )
for block in self.blocks:
lowercase_ = block(UpperCAmelCase )
lowercase_ = nn.silu(UpperCAmelCase )
lowercase_ = self.conv_out(UpperCAmelCase )
return embedding
@flax_register_to_config
class __lowerCamelCase ( nn.Module , snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 32
lowerCAmelCase__ = 4
lowerCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase__ = False
lowerCAmelCase__ = (3_20, 6_40, 12_80, 12_80)
lowerCAmelCase__ = 2
lowerCAmelCase__ = 8
lowerCAmelCase__ = None
lowerCAmelCase__ = 12_80
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = False
lowerCAmelCase__ = jnp.floataa
lowerCAmelCase__ = True
lowerCAmelCase__ = 0
lowerCAmelCase__ = "rgb"
lowerCAmelCase__ = (16, 32, 96, 2_56)
def A__ ( self , UpperCAmelCase ) -> FrozenDict:
'''simple docstring'''
lowercase_ = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase_ = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa )
lowercase_ = jnp.ones((1,) , dtype=jnp.intaa )
lowercase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase_ = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa )
lowercase_ , lowercase_ = jax.random.split(UpperCAmelCase )
lowercase_ = {"params": params_rng, "dropout": dropout_rng}
return self.init(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )["params"]
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = self.block_out_channels
lowercase_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase_ = self.num_attention_heads or self.attention_head_dim
# input
lowercase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase_ = FlaxTimestepEmbedding(UpperCAmelCase , dtype=self.dtype )
lowercase_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase_ = self.only_cross_attention
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase_ = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase_ = []
lowercase_ = []
lowercase_ = block_out_channels[0]
lowercase_ = nn.Conv(
UpperCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase_ = output_channel
lowercase_ = block_out_channels[i]
lowercase_ = i == len(UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase_ = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase_ = FlaxDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase )
for _ in range(self.layers_per_block ):
lowercase_ = nn.Conv(
UpperCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase )
if not is_final_block:
lowercase_ = nn.Conv(
UpperCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase )
lowercase_ = down_blocks
lowercase_ = controlnet_down_blocks
# mid
lowercase_ = block_out_channels[-1]
lowercase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase_ = nn.Conv(
UpperCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1.0 , UpperCAmelCase = True , UpperCAmelCase = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
lowercase_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase_ = jnp.flip(UpperCAmelCase , axis=1 )
# 1. time
if not isinstance(UpperCAmelCase , jnp.ndarray ):
lowercase_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase_ = timesteps.astype(dtype=jnp.floataa )
lowercase_ = jnp.expand_dims(UpperCAmelCase , 0 )
lowercase_ = self.time_proj(UpperCAmelCase )
lowercase_ = self.time_embedding(UpperCAmelCase )
# 2. pre-process
lowercase_ = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) )
lowercase_ = self.conv_in(UpperCAmelCase )
lowercase_ = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) )
lowercase_ = self.controlnet_cond_embedding(UpperCAmelCase )
sample += controlnet_cond
# 3. down
lowercase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase_ , lowercase_ = down_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
else:
lowercase_ , lowercase_ = down_block(UpperCAmelCase , UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase_ = self.mid_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
# 5. contronet blocks
lowercase_ = ()
for down_block_res_sample, controlnet_block in zip(UpperCAmelCase , self.controlnet_down_blocks ):
lowercase_ = controlnet_block(UpperCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase_ = controlnet_down_block_res_samples
lowercase_ = self.controlnet_mid_block(UpperCAmelCase )
# 6. scaling
lowercase_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCAmelCase , mid_block_res_sample=UpperCAmelCase )
| 297
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 1
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass(frozen=snake_case_ )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
@dataclass(frozen=snake_case_ )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=False , UpperCAmelCase = False , ) -> str:
'''simple docstring'''
lowercase_ = hans_processors[task]()
lowercase_ = os.path.join(
UpperCAmelCase , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(UpperCAmelCase ) , UpperCAmelCase , ) , )
lowercase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase_ , lowercase_ = label_list[2], label_list[1]
lowercase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase_ = cached_features_file + ".lock"
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
lowercase_ = torch.load(UpperCAmelCase )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
lowercase_ = (
processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
)
logger.info("Training examples: %s" , len(UpperCAmelCase ) )
lowercase_ = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
logger.info("Saving features into cached file %s" , UpperCAmelCase )
torch.save(self.features , UpperCAmelCase )
def __len__( self ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self , UpperCAmelCase ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def A__ ( self ) -> Tuple:
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 128 , UpperCAmelCase=False , UpperCAmelCase = False , ) -> List[str]:
'''simple docstring'''
lowercase_ = hans_processors[task]()
lowercase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase_ , lowercase_ = label_list[2], label_list[1]
lowercase_ = label_list
lowercase_ = processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
lowercase_ = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase_ = tf.data.Dataset.from_generator(
UpperCAmelCase , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.dataset
def __len__( self ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self , UpperCAmelCase ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.label_list
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , "heuristics_train_set.txt" ) ) , "train" )
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , "heuristics_evaluation_set.txt" ) ) , "dev" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = []
for i, line in enumerate(UpperCAmelCase ):
if i == 0:
continue
lowercase_ = "%s-%s" % (set_type, line[0])
lowercase_ = line[5]
lowercase_ = line[6]
lowercase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
lowercase_ = line[0]
examples.append(InputExample(guid=UpperCAmelCase , text_a=UpperCAmelCase , text_b=UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
return examples
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[InputExample] , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: PreTrainedTokenizer , ):
'''simple docstring'''
lowercase_ = {label: i for i, label in enumerate(__lowerCamelCase )}
lowercase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowerCamelCase ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d" % (ex_index) )
lowercase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , truncation=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , )
lowercase_ = label_map[example.label] if example.label in label_map else 0
lowercase_ = int(example.pairID )
features.append(InputFeatures(**__lowerCamelCase , label=__lowerCamelCase , pairID=__lowerCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
SCREAMING_SNAKE_CASE__ = {
"""hans""": 3,
}
SCREAMING_SNAKE_CASE__ = {
"""hans""": HansProcessor,
}
| 297
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = 1 , UpperCAmelCase = 100 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
lowercase_ = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ = audio_length_in_s * self.unet.config.sample_rate
lowercase_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
lowercase_ = int(UpperCAmelCase )
if sample_size % down_scale_factor != 0:
lowercase_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
" process." )
lowercase_ = int(UpperCAmelCase )
lowercase_ = next(iter(self.unet.parameters() ) ).dtype
lowercase_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowercase_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase , device=audio.device )
lowercase_ = self.scheduler.timesteps.to(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
lowercase_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=UpperCAmelCase )
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297
| 1
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
SCREAMING_SNAKE_CASE__ = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
SCREAMING_SNAKE_CASE__ = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE__ = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE__ = """Normal"""
if result[0][0] == 1:
SCREAMING_SNAKE_CASE__ = """Abnormality detected"""
| 297
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = TFAutoModel.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = AutoModel.from_pretrained(UpperCAmelCase , from_tf=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = AutoModelForPreTraining.from_pretrained(UpperCAmelCase , from_tf=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowercase_ , lowercase_ = TFAutoModelForCausalLM.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase , from_pt=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = AutoModelForCausalLM.from_pretrained(UpperCAmelCase , from_tf=UpperCAmelCase )
lowercase_ , lowercase_ = AutoModelForCausalLM.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase , from_tf=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = AutoModelWithLMHead.from_pretrained(UpperCAmelCase , from_tf=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A__ ( self ) -> int:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowercase_ , lowercase_ = TFAutoModelForMaskedLM.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase , from_pt=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = AutoModelForMaskedLM.from_pretrained(UpperCAmelCase , from_tf=UpperCAmelCase )
lowercase_ , lowercase_ = AutoModelForMaskedLM.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase , from_tf=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A__ ( self ) -> int:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowercase_ , lowercase_ = TFAutoModelForSeqaSeqLM.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase , from_pt=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase , from_tf=UpperCAmelCase )
lowercase_ , lowercase_ = AutoModelForSeqaSeqLM.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase , from_tf=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A__ ( self ) -> str:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase , from_tf=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase_ = AutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase , from_tf=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase ) , 14410 )
lowercase_ = AutoModelWithLMHead.from_pretrained(UpperCAmelCase , from_tf=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase ) , 14410 )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase ) , 14410 )
lowercase_ = AutoModelWithLMHead.from_pretrained(UpperCAmelCase , from_tf=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase ) , 14410 )
| 297
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
| 1
|
import baseaa
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: bytes ):
'''simple docstring'''
return baseaa.aaadecode(__lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297
| 1
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Model type selected in the list: " + ", ".join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
lowerCAmelCase__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
lowerCAmelCase__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "train"
lowerCAmelCase__ = "dev"
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = Split.train , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "pt" , ) -> List[str]:
'''simple docstring'''
lowercase_ = args
lowercase_ = is_language_sensitive
lowercase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
lowercase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase_ = mode
# Load data features from cache or dataset file
lowercase_ = "v2" if args.version_2_with_negative else "v1"
lowercase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase_ = cached_features_file + ".lock"
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not args.overwrite_cache:
lowercase_ = time.time()
lowercase_ = torch.load(UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase_ = self.old_features["features"]
lowercase_ = self.old_features.get("dataset" , UpperCAmelCase )
lowercase_ = self.old_features.get("examples" , UpperCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowercase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase_ = self.processor.get_train_examples(args.data_dir )
lowercase_ , lowercase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=UpperCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCAmelCase , )
lowercase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , UpperCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowercase_ = self.features[i]
lowercase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = b.T
lowercase_ = np.sum(np.square(__lowerCamelCase ) , axis=1 )
lowercase_ = np.sum(np.square(__lowerCamelCase ) , axis=0 )
lowercase_ = np.matmul(__lowerCamelCase , __lowerCamelCase )
lowercase_ = aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = x.reshape(-1 , 3 )
lowercase_ = squared_euclidean_distance(__lowerCamelCase , __lowerCamelCase )
return np.argmin(__lowerCamelCase , axis=1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = ["pixel_values"]
def __init__( self , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = True , **UpperCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = size if size is not None else {"height": 256, "width": 256}
lowercase_ = get_size_dict(UpperCAmelCase )
lowercase_ = np.array(UpperCAmelCase ) if clusters is not None else None
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_normalize
lowercase_ = do_color_quantize
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
lowercase_ = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCAmelCase , size=(size["height"], size["width"]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , ) -> np.ndarray:
'''simple docstring'''
lowercase_ = rescale(image=UpperCAmelCase , scale=1 / 127.5 , data_format=UpperCAmelCase )
lowercase_ = image - 1
return image
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCAmelCase )
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase_ = clusters if clusters is not None else self.clusters
lowercase_ = np.array(UpperCAmelCase )
lowercase_ = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCAmelCase ) for image in images]
if do_color_quantize:
lowercase_ = [to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase_ = np.array(UpperCAmelCase )
lowercase_ = color_quantize(UpperCAmelCase , UpperCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase_ = images.shape[0]
lowercase_ = images.reshape(UpperCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase_ = list(UpperCAmelCase )
else:
lowercase_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase_ = {"input_ids": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 297
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297
| 1
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 297
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297
| 1
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: float | Decimal , __lowerCamelCase: float = 10**-10 ):
'''simple docstring'''
lowercase_ = a
while True:
lowercase_ = Decimal(__lowerCamelCase ) - (
Decimal(eval(__lowerCamelCase ) ) / Decimal(eval(str(diff(__lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowerCamelCase ) ) < precision: # noqa: S307
return float(__lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 297
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = MvpTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 297
| 1
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: List[str]=None , __lowerCamelCase: str=None , __lowerCamelCase: List[str]=None , ):
'''simple docstring'''
if attention_mask is None:
lowercase_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowercase_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowercase_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowerCamelCase )
if decoder_head_mask is None:
lowercase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCamelCase )
if cross_attn_head_mask is None:
lowercase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="relu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=20 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , ) -> List[str]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = encoder_layerdrop
lowercase_ = decoder_layerdrop
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = self.eos_token_id # Eos Token
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase_ = input_ids.clamp(self.pad_token_id + 1 )
lowercase_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowercase_ = self.get_config()
lowercase_ = prepare_mam_aaa_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = MaMaaaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
lowercase_ = inputs_dict["input_ids"]
lowercase_ = inputs_dict["attention_mask"]
lowercase_ = inputs_dict["head_mask"]
# first forward pass
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
lowercase_ , lowercase_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )["last_hidden_state"]
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[
"last_hidden_state"
]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-2 ) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = MaMaaaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
lowercase_ = model(**UpperCAmelCase )
lowercase_ = outputs.encoder_last_hidden_state
lowercase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase )
lowercase_ = MaMaaaEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowercase_ = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase )
lowercase_ = MaMaaaDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowercase_ = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = MaMaaaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
lowercase_ , lowercase_ = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertEqual(info["missing_keys"] , [] )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
if not self.is_encoder_decoder:
lowercase_ = inputs["input_ids"]
del inputs["input_ids"]
else:
lowercase_ = inputs["input_ids"]
lowercase_ = inputs.get("decoder_input_ids" , UpperCAmelCase )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , UpperCAmelCase )
lowercase_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowercase_ = wte(UpperCAmelCase )
else:
lowercase_ = wte(UpperCAmelCase )
lowercase_ = wte(UpperCAmelCase )
with torch.no_grad():
model(**UpperCAmelCase )[0]
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
lowercase_ = input_dict["input_ids"]
lowercase_ = input_ids.ne(1 ).to(UpperCAmelCase )
lowercase_ = MaMaaaForConditionalGeneration(UpperCAmelCase ).eval().to(UpperCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(UpperCAmelCase , attention_mask=UpperCAmelCase )
model.generate(num_beams=4 , do_sample=UpperCAmelCase , early_stopping=UpperCAmelCase , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return torch.tensor(__lowerCamelCase , dtype=torch.long , device=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase )
lowercase_ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowercase_ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowercase_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )[0]
lowercase_ = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , UpperCAmelCase )
# change to expected output here
lowercase_ = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase )
# change to intended input
lowercase_ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowercase_ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowercase_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )[0]
lowercase_ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase )
# change to expected output here
lowercase_ = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase )
lowercase_ = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
lowercase_ = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowercase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="pt" )
lowercase_ = model.generate(
input_ids=dct["input_ids"].to(UpperCAmelCase ) , attention_mask=dct["attention_mask"].to(UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
lowercase_ = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
lowercase_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
assert generated == expected_en
| 297
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
inputs.update({"image_embeds": None} )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = str(__lowerCamelCase )
return len(__lowerCamelCase ) == 9 and set(__lowerCamelCase ) == set("123456789" )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
lowercase_ = 10_0002 * base_num
if is_9_pandigital(__lowerCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowercase_ = 100_2003 * base_num
if is_9_pandigital(__lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 297
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = MvpTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 297
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297
| 1
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
SCREAMING_SNAKE_CASE__ = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
SCREAMING_SNAKE_CASE__ = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
SCREAMING_SNAKE_CASE__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
SCREAMING_SNAKE_CASE__ = f"""down_blocks.{i}.resnets.{j}."""
SCREAMING_SNAKE_CASE__ = f"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
SCREAMING_SNAKE_CASE__ = f"""down_blocks.{i}.attentions.{j}."""
SCREAMING_SNAKE_CASE__ = f"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
SCREAMING_SNAKE_CASE__ = f"""up_blocks.{i}.resnets.{j}."""
SCREAMING_SNAKE_CASE__ = f"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
SCREAMING_SNAKE_CASE__ = f"""up_blocks.{i}.attentions.{j}."""
SCREAMING_SNAKE_CASE__ = f"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
SCREAMING_SNAKE_CASE__ = f"""down_blocks.{i}.downsamplers.0.conv."""
SCREAMING_SNAKE_CASE__ = f"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
SCREAMING_SNAKE_CASE__ = f"""up_blocks.{i}.upsamplers.0."""
SCREAMING_SNAKE_CASE__ = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
SCREAMING_SNAKE_CASE__ = """mid_block.attentions.0."""
SCREAMING_SNAKE_CASE__ = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
SCREAMING_SNAKE_CASE__ = f"""mid_block.resnets.{j}."""
SCREAMING_SNAKE_CASE__ = f"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase_ = v.replace(__lowerCamelCase , __lowerCamelCase )
lowercase_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase_ = v.replace(__lowerCamelCase , __lowerCamelCase )
lowercase_ = v
lowercase_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
SCREAMING_SNAKE_CASE__ = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
SCREAMING_SNAKE_CASE__ = f"""encoder.down_blocks.{i}.resnets.{j}."""
SCREAMING_SNAKE_CASE__ = f"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
SCREAMING_SNAKE_CASE__ = f"""down_blocks.{i}.downsamplers.0."""
SCREAMING_SNAKE_CASE__ = f"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
SCREAMING_SNAKE_CASE__ = f"""up_blocks.{i}.upsamplers.0."""
SCREAMING_SNAKE_CASE__ = f"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
SCREAMING_SNAKE_CASE__ = f"""decoder.up_blocks.{i}.resnets.{j}."""
SCREAMING_SNAKE_CASE__ = f"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
SCREAMING_SNAKE_CASE__ = f"""mid_block.resnets.{i}."""
SCREAMING_SNAKE_CASE__ = f"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
SCREAMING_SNAKE_CASE__ = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] ):
'''simple docstring'''
lowercase_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase_ = v.replace(__lowerCamelCase , __lowerCamelCase )
lowercase_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase_ = v.replace(__lowerCamelCase , __lowerCamelCase )
lowercase_ = v
lowercase_ = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'mid.attn_1.{weight_name}.weight' in k:
print(F'Reshaping {k} for SD format' )
lowercase_ = reshape_weight_for_sd(__lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
SCREAMING_SNAKE_CASE__ = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
SCREAMING_SNAKE_CASE__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
SCREAMING_SNAKE_CASE__ = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
SCREAMING_SNAKE_CASE__ = {"""q""": 0, """k""": 1, """v""": 2}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = {}
lowercase_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowercase_ = k[: -len(".q_proj.weight" )]
lowercase_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowercase_ = [None, None, None]
lowercase_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowercase_ = k[: -len(".q_proj.bias" )]
lowercase_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowercase_ = [None, None, None]
lowercase_ = v
continue
lowercase_ = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCamelCase )
lowercase_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase_ = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCamelCase )
lowercase_ = torch.cat(__lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase_ = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCamelCase )
lowercase_ = torch.cat(__lowerCamelCase )
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
SCREAMING_SNAKE_CASE__ = load_file(unet_path, device="""cpu""")
else:
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
SCREAMING_SNAKE_CASE__ = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
SCREAMING_SNAKE_CASE__ = load_file(vae_path, device="""cpu""")
else:
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
SCREAMING_SNAKE_CASE__ = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
SCREAMING_SNAKE_CASE__ = load_file(text_enc_path, device="""cpu""")
else:
SCREAMING_SNAKE_CASE__ = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
SCREAMING_SNAKE_CASE__ = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
SCREAMING_SNAKE_CASE__ = convert_unet_state_dict(unet_state_dict)
SCREAMING_SNAKE_CASE__ = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
SCREAMING_SNAKE_CASE__ = convert_vae_state_dict(vae_state_dict)
SCREAMING_SNAKE_CASE__ = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
SCREAMING_SNAKE_CASE__ = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
SCREAMING_SNAKE_CASE__ = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
SCREAMING_SNAKE_CASE__ = convert_text_enc_state_dict_vaa(text_enc_dict)
SCREAMING_SNAKE_CASE__ = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
SCREAMING_SNAKE_CASE__ = convert_text_enc_state_dict(text_enc_dict)
SCREAMING_SNAKE_CASE__ = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
SCREAMING_SNAKE_CASE__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
SCREAMING_SNAKE_CASE__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
SCREAMING_SNAKE_CASE__ = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 297
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297
| 1
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = ""
lowerCAmelCase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(self , **UpperCAmelCase )
lowercase_ = repo_info
lowercase_ = token
lowercase_ = None
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.dir_cache is None:
lowercase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCAmelCase ): {"name": str(UpperCAmelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = "rb" , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
if not isinstance(self.repo_info , UpperCAmelCase ):
raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' )
lowercase_ = hf_hub_url(self.repo_info.id , UpperCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
UpperCAmelCase , mode=UpperCAmelCase , headers=get_authentication_headers_for_url(UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
self._get_dirs()
lowercase_ = self._strip_protocol(UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=False , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
self._get_dirs()
lowercase_ = PurePosixPath(path.strip("/" ) )
lowercase_ = {}
for p, f in self.dir_cache.items():
lowercase_ = PurePosixPath(p.strip("/" ) )
lowercase_ = p.parent
if root == path:
lowercase_ = f
lowercase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 297
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ = 1_6
SCREAMING_SNAKE_CASE__ = 3_2
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int = 16 ):
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowercase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(__lowerCamelCase: str ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCamelCase: Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ = 16
elif accelerator.mixed_precision != "no":
lowercase_ = 8
else:
lowercase_ = None
return tokenizer.pad(
__lowerCamelCase , padding="longest" , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowercase_ = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: Tuple ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __lowerCamelCase ) == "1":
lowercase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowercase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowercase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ = config["lr"]
lowercase_ = int(config["num_epochs"] )
lowercase_ = int(config["seed"] )
lowercase_ = int(config["batch_size"] )
set_seed(__lowerCamelCase )
lowercase_ , lowercase_ = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
lowercase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowercase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase_ = batch_size // MAX_GPU_BATCH_SIZE
lowercase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ = AdamW(params=model.parameters() , lr=__lowerCamelCase )
# Instantiate scheduler
lowercase_ = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowercase_ = os.path.split(__lowerCamelCase )[-1].split("." )[0]
accelerator.init_trackers(__lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowercase_ = 0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ = model(**__lowerCamelCase )
lowercase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowercase_ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ = model(**__lowerCamelCase )
lowercase_ = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __lowerCamelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(__lowerCamelCase ),
"epoch": epoch,
} , step=__lowerCamelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__lowerCamelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowercase_ = parser.parse_args()
lowercase_ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 297
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "openai-gpt"
lowerCAmelCase__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCAmelCase=40478 , UpperCAmelCase=512 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.02 , UpperCAmelCase="cls_index" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=0.1 , **UpperCAmelCase , ) -> int:
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = n_positions
lowercase_ = n_embd
lowercase_ = n_layer
lowercase_ = n_head
lowercase_ = afn
lowercase_ = resid_pdrop
lowercase_ = embd_pdrop
lowercase_ = attn_pdrop
lowercase_ = layer_norm_epsilon
lowercase_ = initializer_range
lowercase_ = summary_type
lowercase_ = summary_use_proj
lowercase_ = summary_activation
lowercase_ = summary_first_dropout
lowercase_ = summary_proj_to_labels
super().__init__(**UpperCAmelCase )
| 297
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 1
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 1000 ):
'''simple docstring'''
lowercase_ , lowercase_ = 1, 1
lowercase_ = 2
while True:
lowercase_ = 0
lowercase_ = fa + fa
lowercase_ , lowercase_ = fa, f
index += 1
for _ in str(__lowerCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 1
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowerCamelCase :
"""simple docstring"""
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return None
class __lowerCamelCase :
"""simple docstring"""
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
return None
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase , "tf" , 12 , **UpperCAmelCase )
@require_torch
@slow
def A__ ( self ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase , "pt" , 12 , **UpperCAmelCase )
@require_torch
@slow
def A__ ( self ) -> str:
'''simple docstring'''
from transformers import BertModel
lowercase_ = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(UpperCAmelCase ) )
vocab_file.flush()
lowercase_ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ = BertModel(BertConfig(vocab_size=len(UpperCAmelCase ) ) )
model.save_pretrained(UpperCAmelCase )
self._test_export(UpperCAmelCase , "pt" , 12 , UpperCAmelCase )
@require_tf
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ = self._test_export(UpperCAmelCase , "tf" , 12 , **UpperCAmelCase )
lowercase_ = quantize(Path(UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def A__ ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ = self._test_export(UpperCAmelCase , "pt" , 12 , **UpperCAmelCase )
lowercase_ = quantize(UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ = Path(UpperCAmelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
return path
except Exception as e:
self.fail(UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
from transformers import BertModel
lowercase_ = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase_ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(UpperCAmelCase , UpperCAmelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def A__ ( self ) -> int:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase_ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(UpperCAmelCase , UpperCAmelCase , "tf" )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FeatureExtractionPipeline(UpperCAmelCase , UpperCAmelCase )
lowercase_ = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
lowercase_ , lowercase_ , lowercase_ , lowercase_ = infer_shapes(UpperCAmelCase , UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] , UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = ["input_ids", "attention_mask", "token_type_ids"]
lowercase_ = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
lowercase_ , lowercase_ = ensure_valid_input(FuncContiguousArgs() , UpperCAmelCase , UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCAmelCase ) , set(UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCAmelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ = ensure_valid_input(FuncNonContiguousArgs() , UpperCAmelCase , UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(len(UpperCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 297
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 1
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 1
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list ):
'''simple docstring'''
if any(not isinstance(__lowerCamelCase , __lowerCamelCase ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__lowerCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 297
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 1
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello world! cécé herlolip"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = FairseqRobertaModel.from_pretrained(__lowerCamelCase )
roberta.eval() # disable dropout
lowercase_ = roberta.model.encoder.sentence_encoder
lowercase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowercase_ = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , __lowerCamelCase )
lowercase_ = XLMRobertaXLForSequenceClassification(__lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(__lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase_ = roberta_sent_encoder.embed_tokens.weight
lowercase_ = roberta_sent_encoder.embed_positions.weight
lowercase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowercase_ = roberta_sent_encoder.layer_norm.weight
lowercase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase_ = model.roberta.encoder.layer[i]
lowercase_ = roberta_sent_encoder.layers[i]
lowercase_ = layer.attention
lowercase_ = roberta_layer.self_attn_layer_norm.weight
lowercase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
lowercase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowercase_ = roberta_layer.self_attn.q_proj.weight
lowercase_ = roberta_layer.self_attn.q_proj.bias
lowercase_ = roberta_layer.self_attn.k_proj.weight
lowercase_ = roberta_layer.self_attn.k_proj.bias
lowercase_ = roberta_layer.self_attn.v_proj.weight
lowercase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowercase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowercase_ = roberta_layer.self_attn.out_proj.weight
lowercase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowercase_ = roberta_layer.final_layer_norm.weight
lowercase_ = roberta_layer.final_layer_norm.bias
# intermediate
lowercase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase_ = roberta_layer.fca.weight
lowercase_ = roberta_layer.fca.bias
# output
lowercase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase_ = roberta_layer.fca.weight
lowercase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
lowercase_ = roberta.model.classification_heads["mnli"].dense.weight
lowercase_ = roberta.model.classification_heads["mnli"].dense.bias
lowercase_ = roberta.model.classification_heads["mnli"].out_proj.weight
lowercase_ = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowercase_ = roberta.model.encoder.lm_head.dense.weight
lowercase_ = roberta.model.encoder.lm_head.dense.bias
lowercase_ = roberta.model.encoder.lm_head.layer_norm.weight
lowercase_ = roberta.model.encoder.lm_head.layer_norm.bias
lowercase_ = roberta.model.encoder.lm_head.weight
lowercase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase_ = roberta.encode(__lowerCamelCase ).unsqueeze(0 ) # batch of size 1
lowercase_ = model(__lowerCamelCase )[0]
if classification_head:
lowercase_ = roberta.model.classification_heads["mnli"](roberta.extract_features(__lowerCamelCase ) )
else:
lowercase_ = roberta.model(__lowerCamelCase )[0]
print(our_output.shape , their_output.shape )
lowercase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
lowercase_ = torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__lowerCamelCase ).mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 297
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
| 1
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "mvp"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCAmelCase=50267 , UpperCAmelCase=1024 , UpperCAmelCase=12 , UpperCAmelCase=4096 , UpperCAmelCase=16 , UpperCAmelCase=12 , UpperCAmelCase=4096 , UpperCAmelCase=16 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase="gelu" , UpperCAmelCase=1024 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=0.0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=100 , UpperCAmelCase=800 , **UpperCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = d_model
lowercase_ = encoder_ffn_dim
lowercase_ = encoder_layers
lowercase_ = encoder_attention_heads
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = encoder_layerdrop
lowercase_ = decoder_layerdrop
lowercase_ = classifier_dropout
lowercase_ = use_cache
lowercase_ = encoder_layers
lowercase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ = use_prompt
lowercase_ = prompt_length
lowercase_ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , UpperCAmelCase ):
lowercase_ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"The config can simply be saved and uploaded again to be fixed." )
| 297
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=19 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=UpperCAmelCase , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = EsmForProteinFolding(config=UpperCAmelCase ).float()
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowercase_ = model(UpperCAmelCase )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = False
lowerCAmelCase__ = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {} if is_torch_available() else {}
lowerCAmelCase__ = False
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = EsmFoldModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@unittest.skip("Does not support attention outputs" )
def A__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip
def A__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def A__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def A__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def A__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def A__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def A__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("ESMFold only has one output format." )
def A__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def A__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support input chunking." )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def A__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def A__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
@slow
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
lowercase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ = model(UpperCAmelCase )["positions"]
lowercase_ = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , UpperCAmelCase , atol=1e-4 ) )
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297
| 1
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
while second != 0:
lowercase_ = first & second
first ^= second
lowercase_ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input("""Enter the first number: """).strip())
SCREAMING_SNAKE_CASE__ = int(input("""Enter the second number: """).strip())
print(f"""{add(first, second) = }""")
| 297
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
| 1
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297
| 1
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowercase_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowercase_ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowercase_ = [1, 0]
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> List[str]:
'''simple docstring'''
lowercase_ = hidden_states
lowercase_ = []
lowercase_ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowercase_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowercase_ = self.transformer_index_for_condition[i]
lowercase_ = self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowercase_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowercase_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 1
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase = 3 , UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = "relu" , **UpperCAmelCase , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=UpperCAmelCase , kernel_size=UpperCAmelCase , strides=UpperCAmelCase , padding="VALID" , groups=UpperCAmelCase , use_bias=UpperCAmelCase , name="convolution" , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.convolution(self.padding(UpperCAmelCase ) )
lowercase_ = self.normalization(UpperCAmelCase )
lowercase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def A__ ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = shape_list(UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(UpperCAmelCase , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase = 2 , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = tf.keras.layers.ConvaD(
filters=UpperCAmelCase , kernel_size=1 , strides=UpperCAmelCase , use_bias=UpperCAmelCase , name="convolution" )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(UpperCAmelCase ) , training=UpperCAmelCase )
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase , name="pooler" )
lowercase_ = [
tf.keras.layers.ConvaD(filters=UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = self.pooler(UpperCAmelCase )
for layer_module in self.attention:
lowercase_ = layer_module(UpperCAmelCase )
lowercase_ = hidden_state * pooled
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(UpperCAmelCase , stride=UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCAmelCase , stride=UpperCAmelCase , groups=UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase , name="layer.2" ),
]
lowercase_ = ACTaFN[config.hidden_act]
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(UpperCAmelCase )
lowercase_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowercase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(UpperCAmelCase , stride=UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowercase_ = [
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCAmelCase , stride=UpperCAmelCase , groups=UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase , name="layer.3" ),
]
lowercase_ = ACTaFN[config.hidden_act]
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(UpperCAmelCase )
lowercase_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowercase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 , UpperCAmelCase = 2 , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase , name="layers.0" ),
*[layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
for layer_module in self.layers:
lowercase_ = layer_module(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , depth=UpperCAmelCase , name=F'stages.{i+1}' ) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(UpperCAmelCase )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase )
@keras_serializable
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
lowerCAmelCase__ = RegNetConfig
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(UpperCAmelCase , name="embedder" )
lowercase_ = TFRegNetEncoder(UpperCAmelCase , name="encoder" )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase , name="pooler" )
@unpack_inputs
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(UpperCAmelCase , training=UpperCAmelCase )
lowercase_ = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = RegNetConfig
lowerCAmelCase__ = "regnet"
lowerCAmelCase__ = "pixel_values"
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
SCREAMING_SNAKE_CASE__ = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , snake_case_ , )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowercase_ = TFRegNetMainLayer(UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , snake_case_ , )
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(UpperCAmelCase , name="regnet" )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](UpperCAmelCase )
lowercase_ = self.classifier[1](UpperCAmelCase )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase , logits=UpperCAmelCase )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
| 297
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = SwinvaConfig()
lowercase_ = swinva_name.split("_" )
lowercase_ = name_split[1]
if "to" in name_split[3]:
lowercase_ = int(name_split[3][-3:] )
else:
lowercase_ = int(name_split[3] )
if "to" in name_split[2]:
lowercase_ = int(name_split[2][-2:] )
else:
lowercase_ = int(name_split[2][6:] )
if model_size == "tiny":
lowercase_ = 96
lowercase_ = (2, 2, 6, 2)
lowercase_ = (3, 6, 12, 24)
elif model_size == "small":
lowercase_ = 96
lowercase_ = (2, 2, 18, 2)
lowercase_ = (3, 6, 12, 24)
elif model_size == "base":
lowercase_ = 128
lowercase_ = (2, 2, 18, 2)
lowercase_ = (4, 8, 16, 32)
else:
lowercase_ = 192
lowercase_ = (2, 2, 18, 2)
lowercase_ = (6, 12, 24, 48)
if "to" in swinva_name:
lowercase_ = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowercase_ = 2_1841
lowercase_ = "huggingface/label-files"
lowercase_ = "imagenet-22k-id2label.json"
lowercase_ = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
lowercase_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
else:
lowercase_ = 1000
lowercase_ = "huggingface/label-files"
lowercase_ = "imagenet-1k-id2label.json"
lowercase_ = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
lowercase_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = img_size
lowercase_ = num_classes
lowercase_ = embed_dim
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
return config
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
lowercase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase_ = "encoder." + name
if "attn.proj" in name:
lowercase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase_ = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
lowercase_ = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
lowercase_ = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
lowercase_ = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
lowercase_ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
lowercase_ = "layernorm.weight"
if name == "norm.bias":
lowercase_ = "layernorm.bias"
if "head" in name:
lowercase_ = name.replace("head" , "classifier" )
else:
lowercase_ = "swinv2." + name
return name
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(__lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowercase_ = key.split("." )
lowercase_ = int(key_split[1] )
lowercase_ = int(key_split[3] )
lowercase_ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
else:
lowercase_ = val[:dim]
lowercase_ = val[
dim : dim * 2
]
lowercase_ = val[-dim:]
else:
lowercase_ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase )
timm_model.eval()
lowercase_ = get_swinva_config(__lowerCamelCase )
lowercase_ = SwinvaForImageClassification(__lowerCamelCase )
model.eval()
lowercase_ = convert_state_dict(timm_model.state_dict() , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
lowercase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
lowercase_ = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase_ = image_processor(images=__lowerCamelCase , return_tensors="pt" )
lowercase_ = timm_model(inputs["pixel_values"] )
lowercase_ = model(**__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
model.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 297
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297
| 1
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = SwinConfig()
lowercase_ = swin_name.split("_" )
lowercase_ = name_split[1]
lowercase_ = int(name_split[4] )
lowercase_ = int(name_split[3][-1] )
if model_size == "tiny":
lowercase_ = 96
lowercase_ = (2, 2, 6, 2)
lowercase_ = (3, 6, 12, 24)
elif model_size == "small":
lowercase_ = 96
lowercase_ = (2, 2, 18, 2)
lowercase_ = (3, 6, 12, 24)
elif model_size == "base":
lowercase_ = 128
lowercase_ = (2, 2, 18, 2)
lowercase_ = (4, 8, 16, 32)
else:
lowercase_ = 192
lowercase_ = (2, 2, 18, 2)
lowercase_ = (6, 12, 24, 48)
if "in22k" in swin_name:
lowercase_ = 2_1841
else:
lowercase_ = 1000
lowercase_ = "huggingface/label-files"
lowercase_ = "imagenet-1k-id2label.json"
lowercase_ = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
lowercase_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = img_size
lowercase_ = num_classes
lowercase_ = embed_dim
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
return config
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
if "patch_embed.proj" in name:
lowercase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase_ = "encoder." + name
if "attn.proj" in name:
lowercase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase_ = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
lowercase_ = "layernorm.weight"
if name == "norm.bias":
lowercase_ = "layernorm.bias"
if "head" in name:
lowercase_ = name.replace("head" , "classifier" )
else:
lowercase_ = "swin." + name
return name
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(__lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowercase_ = key.split("." )
lowercase_ = int(key_split[1] )
lowercase_ = int(key_split[3] )
lowercase_ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[
dim : dim * 2, :
]
lowercase_ = val[-dim:, :]
else:
lowercase_ = val[
:dim
]
lowercase_ = val[
dim : dim * 2
]
lowercase_ = val[
-dim:
]
else:
lowercase_ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase )
timm_model.eval()
lowercase_ = get_swin_config(__lowerCamelCase )
lowercase_ = SwinForImageClassification(__lowerCamelCase )
model.eval()
lowercase_ = convert_state_dict(timm_model.state_dict() , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
lowercase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
lowercase_ = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase_ = image_processor(images=__lowerCamelCase , return_tensors="pt" )
lowercase_ = timm_model(inputs["pixel_values"] )
lowercase_ = model(**__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 297
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = MvpTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 297
| 1
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase_ = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , __lowerCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
assert _test_patching.open is open
lowercase_ = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , __lowerCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , __lowerCamelCase ):
pass
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , __lowerCamelCase ) is None
with patch_submodule(_test_patching , "len" , __lowerCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = "__test_patch_submodule_start_and_stop_mock__"
lowercase_ = patch_submodule(_test_patching , "open" , __lowerCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase_ = "__test_patch_submodule_successive_join__"
lowercase_ = "__test_patch_submodule_successive_dirname__"
lowercase_ = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , __lowerCamelCase ):
with patch_submodule(_test_patching , "os.rename" , __lowerCamelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , __lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , __lowerCamelCase ):
with patch_submodule(_test_patching , "os.path.join" , __lowerCamelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , __lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , __lowerCamelCase ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , __lowerCamelCase ):
pass
| 297
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
inputs.update({"image_embeds": None} )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
lowercase_ = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
lowercase_ = token.rstrip("\n" )
lowercase_ = index
return vocab
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase=200 ) -> int:
'''simple docstring'''
lowercase_ = vocab
lowercase_ = unk_token
lowercase_ = max_input_chars_per_word
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = list(UpperCAmelCase )
if len(UpperCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase_ = 0
lowercase_ = []
while start < len(UpperCAmelCase ):
lowercase_ = len(UpperCAmelCase )
lowercase_ = None
while start < end:
lowercase_ = "".join(chars[start:end] )
if substr in self.vocab:
lowercase_ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCAmelCase )
lowercase_ = end
return sub_tokens
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = False
def __init__( self , UpperCAmelCase , UpperCAmelCase="<d>" , UpperCAmelCase="</d>" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<unk>" , UpperCAmelCase="</n>" , UpperCAmelCase="</_>" , UpperCAmelCase="left" , **UpperCAmelCase , ) -> str:
'''simple docstring'''
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=UpperCAmelCase , eod_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , pad_token=UpperCAmelCase , unk_token=UpperCAmelCase , line_token=UpperCAmelCase , space_token=UpperCAmelCase , padding_side=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = bod_token
lowercase_ = eod_token
lowercase_ = load_vocab(UpperCAmelCase )
lowercase_ = self.encoder[space_token]
lowercase_ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase : x[1] ) )
lowercase_ = {v: k for k, v in self.encoder.items()}
lowercase_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return self.encoder["\n"]
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = []
for x in jieba.cut(UpperCAmelCase , cut_all=UpperCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCAmelCase ) )
return output_tokens
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = [i for i in token_ids if i >= 0]
lowercase_ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return token in self.encoder
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return "".join(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if os.path.isdir(UpperCAmelCase ):
lowercase_ = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase_ = (filename_prefix + "-" if filename_prefix else "") + save_directory
lowercase_ = 0
if " " in self.encoder:
lowercase_ = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
lowercase_ = self.encoder["\n"]
del self.encoder["\n"]
lowercase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase : x[1] ) )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
lowercase_ = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase ))
return [1] + ([0] * len(UpperCAmelCase ))
| 297
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297
| 1
|
import os
SCREAMING_SNAKE_CASE__ = {"""I""": 1, """V""": 5, """X""": 1_0, """L""": 5_0, """C""": 1_0_0, """D""": 5_0_0, """M""": 1_0_0_0}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = 0
while index < len(__lowerCamelCase ) - 1:
lowercase_ = SYMBOLS[numerals[index]]
lowercase_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = ""
lowercase_ = num // 1000
numerals += m_count * "M"
num %= 1000
lowercase_ = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowercase_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str = "/p089_roman.txt" ):
'''simple docstring'''
lowercase_ = 0
with open(os.path.dirname(__lowerCamelCase ) + roman_numerals_filename ) as filea:
lowercase_ = filea.readlines()
for line in lines:
lowercase_ = line.strip()
lowercase_ = parse_roman_numerals(__lowerCamelCase )
lowercase_ = generate_roman_numerals(__lowerCamelCase )
savings += len(__lowerCamelCase ) - len(__lowerCamelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 297
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=30 , UpperCAmelCase=400 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=True , UpperCAmelCase=1 / 255 , UpperCAmelCase=True , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size
lowercase_ = do_normalize
lowercase_ = image_mean
lowercase_ = image_std
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if not batched:
lowercase_ = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
lowercase_ , lowercase_ = image.size
else:
lowercase_ , lowercase_ = image.shape[1], image.shape[2]
if w < h:
lowercase_ = int(self.size["shortest_edge"] * h / w )
lowercase_ = self.size["shortest_edge"]
elif w > h:
lowercase_ = self.size["shortest_edge"]
lowercase_ = int(self.size["shortest_edge"] * w / h )
else:
lowercase_ = self.size["shortest_edge"]
lowercase_ = self.size["shortest_edge"]
else:
lowercase_ = []
for image in image_inputs:
lowercase_ , lowercase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
lowercase_ = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = YolosImageProcessor if is_vision_available() else None
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = YolosImageProcessingTester(self )
@property
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
lowercase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
lowercase_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.image_processing_class(**self.image_processor_dict )
lowercase_ = self.image_processing_class(do_resize=UpperCAmelCase , do_normalize=UpperCAmelCase , do_rescale=UpperCAmelCase )
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ = image_processing_a.pad(UpperCAmelCase , return_tensors="pt" )
lowercase_ = image_processing_a(UpperCAmelCase , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1e-4 ) )
@slow
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowercase_ = json.loads(f.read() )
lowercase_ = {"image_id": 39769, "annotations": target}
# encode them
lowercase_ = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
lowercase_ = image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , return_tensors="pt" )
# verify pixel values
lowercase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase ) )
# verify boxes
lowercase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase ) )
# verify is_crowd
lowercase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase ) )
# verify class_labels
lowercase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase ) )
# verify orig_size
lowercase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase ) )
# verify size
lowercase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase ) )
@slow
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowercase_ = json.loads(f.read() )
lowercase_ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
lowercase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowercase_ = YolosImageProcessor(format="coco_panoptic" )
lowercase_ = image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , masks_path=UpperCAmelCase , return_tensors="pt" )
# verify pixel values
lowercase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase ) )
# verify boxes
lowercase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase ) )
# verify is_crowd
lowercase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase ) )
# verify class_labels
lowercase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase ) )
# verify masks
lowercase_ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCAmelCase )
# verify orig_size
lowercase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase ) )
# verify size
lowercase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase ) )
| 297
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase = True ) -> None:
'''simple docstring'''
lowercase_ = {} # dictionary of lists
lowercase_ = directed
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
self.adj_list[destination_vertex].append(UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
lowercase_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase )
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase_ = [destination_vertex]
lowercase_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
lowercase_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase_ = [destination_vertex]
lowercase_ = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 297
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
| 1
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = tmp_path / "file.csv"
lowercase_ = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] ):
'''simple docstring'''
lowercase_ = tmp_path / "malformed_file.csv"
lowercase_ = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = tmp_path / "csv_with_image.csv"
lowercase_ = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = tmp_path / "csv_with_label.csv"
lowercase_ = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = tmp_path / "csv_with_int_list.csv"
lowercase_ = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = Csv()
lowercase_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__lowerCamelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(__lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with open(__lowerCamelCase , encoding="utf-8" ) as f:
lowercase_ = f.read().splitlines()[1]
lowercase_ = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
lowercase_ = csv._generate_tables([[csv_file_with_image]] )
lowercase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
lowercase_ = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with open(__lowerCamelCase , encoding="utf-8" ) as f:
lowercase_ = f.read().splitlines()[1:]
lowercase_ = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
lowercase_ = csv._generate_tables([[csv_file_with_label]] )
lowercase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
lowercase_ = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(__lowerCamelCase ) for label in labels]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda __lowerCamelCase : [int(__lowerCamelCase ) for i in x.split()]} )
lowercase_ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
lowercase_ = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 297
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 1
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
SCREAMING_SNAKE_CASE__ = """Usage of script: script_name <size_of_canvas:int>"""
SCREAMING_SNAKE_CASE__ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = [[False for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
return canvas
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__lowerCamelCase ):
for j, _ in enumerate(__lowerCamelCase ):
lowercase_ = bool(random.getrandbits(1 ) )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list[list[bool]] ):
'''simple docstring'''
lowercase_ = np.array(__lowerCamelCase )
lowercase_ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowerCamelCase ):
for c, pt in enumerate(__lowerCamelCase ):
lowercase_ = __judge_point(
__lowerCamelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowercase_ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowercase_ = current_canvas.tolist()
return return_canvas
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: bool , __lowerCamelCase: list[list[bool]] ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowercase_ = pt
if pt:
if alive < 2:
lowercase_ = False
elif alive == 2 or alive == 3:
lowercase_ = True
elif alive > 3:
lowercase_ = False
else:
if alive == 3:
lowercase_ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
# main working structure of this module.
SCREAMING_SNAKE_CASE__ = create_canvas(canvas_size)
seed(c)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = plt.subplots()
fig.show()
SCREAMING_SNAKE_CASE__ = ListedColormap(["""w""", """k"""])
try:
while True:
SCREAMING_SNAKE_CASE__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE__ = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
SCREAMING_SNAKE_CASE__ = (
subprocess.check_output(f"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
)
SCREAMING_SNAKE_CASE__ = """|""".join(sys.argv[1:])
SCREAMING_SNAKE_CASE__ = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
SCREAMING_SNAKE_CASE__ = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 297
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "trajectory_transformer"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCAmelCase=100 , UpperCAmelCase=5 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=249 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=25 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=128 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0006 , UpperCAmelCase=512 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=1 , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=50256 , UpperCAmelCase=50256 , **UpperCAmelCase , ) -> str:
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = action_weight
lowercase_ = reward_weight
lowercase_ = value_weight
lowercase_ = max_position_embeddings
lowercase_ = block_size
lowercase_ = action_dim
lowercase_ = observation_dim
lowercase_ = transition_dim
lowercase_ = learning_rate
lowercase_ = n_layer
lowercase_ = n_head
lowercase_ = n_embd
lowercase_ = embd_pdrop
lowercase_ = attn_pdrop
lowercase_ = resid_pdrop
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = kaiming_initializer_range
lowercase_ = use_cache
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str = "laptop" ):
'''simple docstring'''
lowercase_ = F'https://www.amazon.in/laptop/s?k={product}'
lowercase_ = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
lowercase_ = BeautifulSoup(requests.get(__lowerCamelCase , headers=__lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
lowercase_ = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
lowercase_ = item.ha.text
lowercase_ = "https://www.amazon.in/" + item.ha.a["href"]
lowercase_ = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
lowercase_ = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
lowercase_ = "Not available"
try:
lowercase_ = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
lowercase_ = ""
try:
lowercase_ = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
lowercase_ = float("nan" )
except AttributeError:
pass
lowercase_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowercase_ = " "
lowercase_ = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = """headphones"""
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
| 297
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = []
lowercase_ , lowercase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowercase_ = result + left + right
return input_list
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list ):
'''simple docstring'''
if len(__lowerCamelCase ) <= 1:
return input_list
lowercase_ = list(__lowerCamelCase )
# iteration for two-way merging
lowercase_ = 2
while p <= len(__lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
lowercase_ = i
lowercase_ = i + p - 1
lowercase_ = (low + high + 1) // 2
lowercase_ = merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(__lowerCamelCase ):
lowercase_ = i
lowercase_ = merge(__lowerCamelCase , 0 , __lowerCamelCase , len(__lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
SCREAMING_SNAKE_CASE__ = []
else:
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 297
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = []
create_all_state(1 , __lowerCamelCase , __lowerCamelCase , [] , __lowerCamelCase )
return result
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: list[int] , __lowerCamelCase: list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCamelCase , total_number - level + 2 ):
current_list.append(__lowerCamelCase )
create_all_state(i + 1 , __lowerCamelCase , level - 1 , __lowerCamelCase , __lowerCamelCase )
current_list.pop()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 297
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 1
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=30 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=None , UpperCAmelCase=2 , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 1
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = ViTModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = ViTForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = ViTForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = ViTForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = ViTForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = ViTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = ViTModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = ViTModel.from_pretrained("facebook/dino-vits8" ).to(UpperCAmelCase )
lowercase_ = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" )
lowercase_ = inputs.pixel_values.to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(UpperCAmelCase , interpolate_pos_encoding=UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
lowercase_ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" )
lowercase_ = inputs.pixel_values.to(UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCAmelCase )
| 297
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 1
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowercase_ = precision
lowercase_ = ceil(precision / 14 )
lowercase_ = 42_6880 * Decimal(1_0005 ).sqrt()
lowercase_ = 1
lowercase_ = 1359_1409
lowercase_ = Decimal(__lowerCamelCase )
for k in range(1 , __lowerCamelCase ):
lowercase_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCamelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 5_0
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionSAGPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase_ = CLIPTextModel(UpperCAmelCase )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowercase_ = sag_pipe.to(UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "."
lowercase_ = torch.manual_seed(0 )
lowercase_ = sag_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase_ = sag_pipe.to(UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "."
lowercase_ = torch.manual_seed(0 )
lowercase_ = sag_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase_ = sag_pipe.to(UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "."
lowercase_ = torch.manual_seed(0 )
lowercase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
lowercase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 297
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase__ = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase__ = field(default=snake_case_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
lowercase_ = import_module("tasks" )
try:
lowercase_ = getattr(__lowerCamelCase , model_args.task_type )
lowercase_ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase_ = token_classification_task.get_labels(data_args.labels )
lowercase_ = dict(enumerate(__lowerCamelCase ) )
lowercase_ = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase_ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase_ = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase_ = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase: np.ndarray , __lowerCamelCase: np.ndarray ) -> Tuple[List[int], List[int]]:
lowercase_ = np.argmax(__lowerCamelCase , axis=2 )
lowercase_ , lowercase_ = preds.shape
lowercase_ = [[] for _ in range(__lowerCamelCase )]
lowercase_ = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase: EvalPrediction ) -> Dict:
lowercase_ , lowercase_ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase_ = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase_ = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase_ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase_ = trainer.evaluate()
lowercase_ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , __lowerCamelCase , __lowerCamelCase )
writer.write("%s = %s\n" % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase_ = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase_ , lowercase_ , lowercase_ = trainer.predict(__lowerCamelCase )
lowercase_ , lowercase_ = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase_ = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , __lowerCamelCase , __lowerCamelCase )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
lowercase_ = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 297
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
| 1
|
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = tuple[int, int, int]
SCREAMING_SNAKE_CASE__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE__ = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
SCREAMING_SNAKE_CASE__ = """FOBHMDKEXQNRAULPGSJVTYICZW"""
SCREAMING_SNAKE_CASE__ = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
SCREAMING_SNAKE_CASE__ = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE__ = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
SCREAMING_SNAKE_CASE__ = """SGLCPQWZHKXAREONTFBVIYJUDM"""
SCREAMING_SNAKE_CASE__ = """HVSICLTYKQUBXDWAJZOMFGPREN"""
SCREAMING_SNAKE_CASE__ = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
SCREAMING_SNAKE_CASE__ = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
SCREAMING_SNAKE_CASE__ = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: RotorPositionT , __lowerCamelCase: RotorSelectionT , __lowerCamelCase: str ):
'''simple docstring'''
if (unique_rotsel := len(set(__lowerCamelCase ) )) < 3:
lowercase_ = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(__lowerCamelCase )
# Checks if rotor positions are valid
lowercase_ , lowercase_ , lowercase_ = rotpos
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(__lowerCamelCase )
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(__lowerCamelCase )
if not 0 < rotorposa <= len(__lowerCamelCase ):
lowercase_ = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(__lowerCamelCase )
# Validates string and returns dict
lowercase_ = _plugboard(__lowerCamelCase )
return rotpos, rotsel, pbdict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = F'Plugboard setting isn\'t type string ({type(__lowerCamelCase )})'
raise TypeError(__lowerCamelCase )
elif len(__lowerCamelCase ) % 2 != 0:
lowercase_ = F'Odd number of symbols ({len(__lowerCamelCase )})'
raise Exception(__lowerCamelCase )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
lowercase_ = set()
for i in pbstring:
if i not in abc:
lowercase_ = F'\'{i}\' not in list of symbols'
raise Exception(__lowerCamelCase )
elif i in tmppbl:
lowercase_ = F'Duplicate symbol ({i})'
raise Exception(__lowerCamelCase )
else:
tmppbl.add(__lowerCamelCase )
del tmppbl
# Created the dictionary
lowercase_ = {}
for j in range(0 , len(__lowerCamelCase ) - 1 , 2 ):
lowercase_ = pbstring[j + 1]
lowercase_ = pbstring[j]
return pb
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: RotorPositionT , __lowerCamelCase: RotorSelectionT = (rotora, rotora, rotora) , __lowerCamelCase: str = "" , ):
'''simple docstring'''
lowercase_ = text.upper()
lowercase_ , lowercase_ , lowercase_ = _validator(
__lowerCamelCase , __lowerCamelCase , plugb.upper() )
lowercase_ , lowercase_ , lowercase_ = rotor_position
lowercase_ , lowercase_ , lowercase_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# rotor ra --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# rotor rb --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# rotor rc --------------------------
lowercase_ = abc.index(__lowerCamelCase ) + rotorposa
lowercase_ = rotora[index % len(__lowerCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase_ = reflector[symbol]
# 2nd rotors
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
lowercase_ = abc[rotora.index(__lowerCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
lowercase_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = """This is my Python script that emulates the Enigma machine from WWII."""
SCREAMING_SNAKE_CASE__ = (1, 1, 1)
SCREAMING_SNAKE_CASE__ = """pictures"""
SCREAMING_SNAKE_CASE__ = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE__ = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 297
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297
| 1
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowerCamelCase ( unittest.TestCase , snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_tool("text-to-speech" )
self.tool.setup()
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = self.tool("hey" )
lowercase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = self.tool("hey" )
lowercase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 1
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] ):
'''simple docstring'''
lowercase_ = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase )
lowercase_ = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "sigmoid"
lowerCAmelCase__ = "softmax"
lowerCAmelCase__ = "none"
@add_end_docstrings(
snake_case_ , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def A__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="" , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = tokenizer_kwargs
lowercase_ = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
lowercase_ = self.model.config.return_all_scores
if isinstance(UpperCAmelCase , UpperCAmelCase ) or top_k is None:
lowercase_ = top_k
lowercase_ = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , UpperCAmelCase , )
if return_all_scores:
lowercase_ = None
else:
lowercase_ = 1
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase_ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowercase_ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = super().__call__(*UpperCAmelCase , **UpperCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowercase_ = "top_k" not in kwargs
if isinstance(args[0] , UpperCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Dict[str, GenericTensor]:
'''simple docstring'''
lowercase_ = self.framework
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return self.tokenizer(**UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) == 1 and isinstance(inputs[0] , UpperCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase , **UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return self.model(**UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=True ) -> Any:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowercase_ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowercase_ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
lowercase_ = self.model.config.function_to_apply
else:
lowercase_ = ClassificationFunction.NONE
lowercase_ = model_outputs["logits"][0]
lowercase_ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowercase_ = sigmoid(UpperCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowercase_ = softmax(UpperCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowercase_ = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowercase_ = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(UpperCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k is not None:
lowercase_ = dict_scores[:top_k]
return dict_scores
| 297
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = num_attention_heads * attention_head_dim
lowercase_ = in_channels
lowercase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowercase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowercase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = batch_frames // num_frames
lowercase_ = hidden_states
lowercase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase_ = self.norm(UpperCAmelCase )
lowercase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowercase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowercase_ = self.proj_out(UpperCAmelCase )
lowercase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 297
| 1
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = ["vqvae"]
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase , mel=UpperCAmelCase , vqvae=UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , UpperCAmelCase ) else 1000
@torch.no_grad()
def __call__( self , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowercase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase )
lowercase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase , device=self.device , )
lowercase_ = noise
lowercase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.mel.audio_slice_to_image(UpperCAmelCase )
lowercase_ = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
lowercase_ = (input_image / 255) * 2 - 1
lowercase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase_ = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=UpperCAmelCase )[0]
lowercase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase_ = self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
lowercase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase_ = int(mask_start_secs * pixels_per_second )
lowercase_ = int(mask_end_secs * pixels_per_second )
lowercase_ = self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCAmelCase ):
lowercase_ = self.unet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )["sample"]
else:
lowercase_ = self.unet(UpperCAmelCase , UpperCAmelCase )["sample"]
if isinstance(self.scheduler , UpperCAmelCase ):
lowercase_ = self.scheduler.step(
model_output=UpperCAmelCase , timestep=UpperCAmelCase , sample=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , )["prev_sample"]
else:
lowercase_ = self.scheduler.step(
model_output=UpperCAmelCase , timestep=UpperCAmelCase , sample=UpperCAmelCase , generator=UpperCAmelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
lowercase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase_ = 1 / self.vqvae.config.scaling_factor * images
lowercase_ = self.vqvae.decode(UpperCAmelCase )["sample"]
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase_ = (images * 255).round().astype("uint8" )
lowercase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase , mode="RGB" ).convert("L" ) for _ in images) )
lowercase_ = [self.mel.image_to_audio(UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCAmelCase ) )
@torch.no_grad()
def A__ ( self , UpperCAmelCase , UpperCAmelCase = 50 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , UpperCAmelCase )
self.scheduler.set_timesteps(UpperCAmelCase )
lowercase_ = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
lowercase_ = (sample / 255) * 2 - 1
lowercase_ = torch.Tensor(UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase_ = self.scheduler.alphas_cumprod[t]
lowercase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase_ = 1 - alpha_prod_t
lowercase_ = self.unet(UpperCAmelCase , UpperCAmelCase )["sample"]
lowercase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def A__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
lowercase_ = acos(torch.dot(torch.flatten(UpperCAmelCase ) , torch.flatten(UpperCAmelCase ) ) / torch.norm(UpperCAmelCase ) / torch.norm(UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(UpperCAmelCase )
| 297
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297
| 1
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowercase_ = set()
# Replace all the whitespace in our sentence
lowercase_ = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowerCamelCase ) == 26
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowercase_ = [False] * 26
for char in input_str:
if char.islower():
lowercase_ = True
elif char.isupper():
lowercase_ = True
return all(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
from timeit import timeit
lowercase_ = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=__lowerCamelCase ) )
print(timeit("is_pangram_faster()" , setup=__lowerCamelCase ) )
print(timeit("is_pangram_fastest()" , setup=__lowerCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 297
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = MvpTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 297
| 1
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
SCREAMING_SNAKE_CASE__ = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
SCREAMING_SNAKE_CASE__ = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
SCREAMING_SNAKE_CASE__ = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
if rouge_types is None:
lowercase_ = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
lowercase_ = rouge_scorer.RougeScorer(rouge_types=UpperCAmelCase , use_stemmer=UpperCAmelCase )
if use_aggregator:
lowercase_ = scoring.BootstrapAggregator()
else:
lowercase_ = []
for ref, pred in zip(UpperCAmelCase , UpperCAmelCase ):
lowercase_ = scorer.score(UpperCAmelCase , UpperCAmelCase )
if use_aggregator:
aggregator.add_scores(UpperCAmelCase )
else:
scores.append(UpperCAmelCase )
if use_aggregator:
lowercase_ = aggregator.aggregate()
else:
lowercase_ = {}
for key in scores[0]:
lowercase_ = [score[key] for score in scores]
return result
| 297
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
inputs.update({"image_embeds": None} )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 1
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = len(set_a.intersection(__lowerCamelCase ) )
if alternative_union:
lowercase_ = len(__lowerCamelCase ) + len(__lowerCamelCase )
else:
lowercase_ = len(set_a.union(__lowerCamelCase ) )
return intersection / union
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(__lowerCamelCase , (list, tuple) ):
lowercase_ = [element for element in set_a if element in set_b]
if alternative_union:
lowercase_ = len(__lowerCamelCase ) + len(__lowerCamelCase )
return len(__lowerCamelCase ) / union
else:
lowercase_ = set_a + [element for element in set_b if element not in set_a]
return len(__lowerCamelCase ) / len(__lowerCamelCase )
return len(__lowerCamelCase ) / len(__lowerCamelCase )
return None
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {"""a""", """b""", """c""", """d""", """e"""}
SCREAMING_SNAKE_CASE__ = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 297
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = (DPMSolverSinglestepScheduler,)
lowerCAmelCase__ = (("num_inference_steps", 25),)
def A__ ( self , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**UpperCAmelCase )
return config
def A__ ( self , UpperCAmelCase=0 , **UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = dict(self.forward_default_kwargs )
lowercase_ = kwargs.pop("num_inference_steps" , UpperCAmelCase )
lowercase_ = self.dummy_sample
lowercase_ = 0.1 * sample
lowercase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ = self.get_scheduler_config(**UpperCAmelCase )
lowercase_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
lowercase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
lowercase_ = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
lowercase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ , lowercase_ = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
lowercase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
lowercase_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def A__ ( self , UpperCAmelCase=0 , **UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = dict(self.forward_default_kwargs )
lowercase_ = kwargs.pop("num_inference_steps" , UpperCAmelCase )
lowercase_ = self.dummy_sample
lowercase_ = 0.1 * sample
lowercase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
lowercase_ = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
lowercase_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self , UpperCAmelCase=None , **UpperCAmelCase ) -> str:
'''simple docstring'''
if scheduler is None:
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(**UpperCAmelCase )
lowercase_ = scheduler_class(**UpperCAmelCase )
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(**UpperCAmelCase )
lowercase_ = scheduler_class(**UpperCAmelCase )
lowercase_ = 10
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ = model(UpperCAmelCase , UpperCAmelCase )
lowercase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase_ = 50
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowercase_ = model(UpperCAmelCase , UpperCAmelCase )
lowercase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
lowercase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase_ = self.full_loop(scheduler=UpperCAmelCase )
lowercase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
lowercase_ = DEISMultistepScheduler.from_config(scheduler.config )
lowercase_ = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase_ = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase_ = self.full_loop(scheduler=UpperCAmelCase )
lowercase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type="dpmsolver++" , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
lowercase_ = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def A__ ( self ) -> Any:
'''simple docstring'''
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def A__ ( self ) -> int:
'''simple docstring'''
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type="learned_range" )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.full_loop()
lowercase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.full_loop(use_karras_sigmas=UpperCAmelCase )
lowercase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.full_loop(prediction_type="v_prediction" )
lowercase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=UpperCAmelCase )
lowercase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
lowercase_ = scheduler_class(**UpperCAmelCase )
lowercase_ = 10
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ = model(UpperCAmelCase , UpperCAmelCase )
lowercase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 297
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297
| 1
|
from __future__ import annotations
from math import pi, sqrt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297
| 1
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: bytes , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = F'{sampling_rate}'
lowercase_ = "1"
lowercase_ = "f32le"
lowercase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase_ = ffmpeg_process.communicate(__lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
lowercase_ = output_stream[0]
lowercase_ = np.frombuffer(__lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: float , __lowerCamelCase: str = "f32le" , ):
'''simple docstring'''
lowercase_ = F'{sampling_rate}'
lowercase_ = "1"
if format_for_conversion == "s16le":
lowercase_ = 2
elif format_for_conversion == "f32le":
lowercase_ = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
lowercase_ = platform.system()
if system == "Linux":
lowercase_ = "alsa"
lowercase_ = "default"
elif system == "Darwin":
lowercase_ = "avfoundation"
lowercase_ = ":0"
elif system == "Windows":
lowercase_ = "dshow"
lowercase_ = "default"
lowercase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
lowercase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase_ = _ffmpeg_stream(__lowerCamelCase , __lowerCamelCase )
for item in iterator:
yield item
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: float , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[Union[Tuple[float, float], float]] = None , __lowerCamelCase: str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
lowercase_ = stream_chunk_s
else:
lowercase_ = chunk_length_s
lowercase_ = ffmpeg_microphone(__lowerCamelCase , __lowerCamelCase , format_for_conversion=__lowerCamelCase )
if format_for_conversion == "s16le":
lowercase_ = np.intaa
lowercase_ = 2
elif format_for_conversion == "f32le":
lowercase_ = np.floataa
lowercase_ = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
lowercase_ = chunk_length_s / 6
lowercase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__lowerCamelCase , (int, float) ):
lowercase_ = [stride_length_s, stride_length_s]
lowercase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase_ = datetime.datetime.now()
lowercase_ = datetime.timedelta(seconds=__lowerCamelCase )
for item in chunk_bytes_iter(__lowerCamelCase , __lowerCamelCase , stride=(stride_left, stride_right) , stream=__lowerCamelCase ):
# Put everything back in numpy scale
lowercase_ = np.frombuffer(item["raw"] , dtype=__lowerCamelCase )
lowercase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
lowercase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: Tuple[int, int] , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = B""
lowercase_ , lowercase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
lowercase_ = 0
for raw in iterator:
acc += raw
if stream and len(__lowerCamelCase ) < chunk_len:
lowercase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowercase_ = (_stride_left, stride_right)
lowercase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
lowercase_ = False
yield item
lowercase_ = stride_left
lowercase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__lowerCamelCase ) > stride_left:
lowercase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
lowercase_ = False
yield item
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__lowerCamelCase , stdout=subprocess.PIPE , bufsize=__lowerCamelCase ) as ffmpeg_process:
while True:
lowercase_ = ffmpeg_process.stdout.read(__lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 297
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 1
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = u
for i in range(1 , __lowerCamelCase ):
lowercase_ = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = int(input("enter the numbers of values: " ) )
lowercase_ = []
for _ in range(__lowerCamelCase ):
y.append([] )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
y[i].append(__lowerCamelCase )
lowercase_ = 0
print("enter the values of parameters in a list: " )
lowercase_ = list(map(__lowerCamelCase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(__lowerCamelCase ):
lowercase_ = float(input() )
lowercase_ = int(input("enter the value to interpolate: " ) )
lowercase_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __lowerCamelCase ):
for j in range(n - i ):
lowercase_ = y[j + 1][i - 1] - y[j][i - 1]
lowercase_ = y[0][0]
for i in range(1 , __lowerCamelCase ):
summ += (ucal(__lowerCamelCase , __lowerCamelCase ) * y[0][i]) / math.factorial(__lowerCamelCase )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 297
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 1
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
SCREAMING_SNAKE_CASE__ = namedtuple("""CoinsDistribResult""", """moves excess""")
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__lowerCamelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__lowerCamelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__lowerCamelCase ) != count_coins(__lowerCamelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__lowerCamelCase: TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase_ , lowercase_ = get_distrib(node.left )
lowercase_ , lowercase_ = get_distrib(node.right )
lowercase_ = 1 - left_distrib_excess
lowercase_ = 1 - right_distrib_excess
lowercase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(__lowerCamelCase )
+ abs(__lowerCamelCase )
)
lowercase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__lowerCamelCase , __lowerCamelCase )
return get_distrib(__lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""google/fnet-base""": 5_1_2,
"""google/fnet-large""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ = """▁"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "token_type_ids"]
lowerCAmelCase__ = FNetTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="<unk>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<pad>" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , **UpperCAmelCase , ) -> str:
'''simple docstring'''
lowercase_ = (
AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase , normalized=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else mask_token
)
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = do_lower_case
lowercase_ = remove_space
lowercase_ = keep_accents
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase_ = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 297
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = 1 , UpperCAmelCase = 2000 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowercase_ = self.unet.config.sample_size
lowercase_ = (batch_size, 3, img_size, img_size)
lowercase_ = self.unet
lowercase_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase ) * self.scheduler.init_noise_sigma
lowercase_ = sample.to(self.device )
self.scheduler.set_timesteps(UpperCAmelCase )
self.scheduler.set_sigmas(UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase_ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
lowercase_ = self.scheduler.step_correct(UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# prediction step
lowercase_ = model(UpperCAmelCase , UpperCAmelCase ).sample
lowercase_ = self.scheduler.step_pred(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
lowercase_ , lowercase_ = output.prev_sample, output.prev_sample_mean
lowercase_ = sample_mean.clamp(0 , 1 )
lowercase_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 297
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> List[str]:
'''simple docstring'''
lowercase_ = None
lowercase_ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
lowercase_ = os.path.abspath("examples" )
for item in os.listdir(UpperCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowercase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
if os.path.isfile(UpperCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase , feature_script=UpperCAmelCase , tested_section="main()" if parser_only else "training_function()" , ):
lowercase_ = compare_against_test(
os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = "\n".join(UpperCAmelCase )
if special_strings is not None:
for string in special_strings:
lowercase_ = diff.replace(UpperCAmelCase , "" )
self.assertEqual(UpperCAmelCase , "" )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.one_complete_example("complete_nlp_example.py" , UpperCAmelCase )
self.one_complete_example("complete_nlp_example.py" , UpperCAmelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
lowercase_ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.one_complete_example("complete_cv_example.py" , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = False
@classmethod
def A__ ( cls ) -> Tuple:
'''simple docstring'''
super().setUpClass()
lowercase_ = tempfile.mkdtemp()
lowercase_ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
lowercase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def A__ ( cls ) -> List[str]:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowercase_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowercase_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase )
self.assertNotIn("epoch 0:" , UpperCAmelCase )
self.assertIn("epoch 1:" , UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowercase_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase )
if torch.cuda.is_available():
lowercase_ = torch.cuda.device_count()
else:
lowercase_ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , UpperCAmelCase )
self.assertIn("epoch 1:" , UpperCAmelCase )
else:
self.assertIn("epoch 0:" , UpperCAmelCase )
self.assertIn("epoch 1:" , UpperCAmelCase )
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
lowercase_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase )
lowercase_ = re.findall("({.+})" , UpperCAmelCase )
lowercase_ = [r for r in results if "accuracy" in r][-1]
lowercase_ = ast.literal_eval(UpperCAmelCase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def A__ ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
lowercase_ = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , "tracking" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 297
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.