code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
def UpperCamelCase__( UpperCamelCase__ : list[float] )->List[Any]:
A__ = 0.00
A__ = 0
for resistor in resistors:
if resistor <= 0:
A__ = f"Resistor at index {index} has a negative or zero value!"
raise ValueError(SCREAMING_SNAKE_CASE_ )
first_sum += 1 / float(SCREAMING_SNAKE_CASE_ )
index += 1
return 1 / first_sum
def UpperCamelCase__( UpperCamelCase__ : list[float] )->List[str]:
A__ = 0.00
A__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
A__ = f"Resistor at index {index} has a negative value!"
raise ValueError(SCREAMING_SNAKE_CASE_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 256, "width": 256}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_flip_channel_order
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PIL.Image.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[Any]:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> np.ndarray:
return flip_channel_order(_lowerCAmelCase , data_format=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> PIL.Image.Image:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase = [self.flip_channel_order(image=_lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> int:
_lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_lowerCAmelCase ):
_lowerCAmelCase = target_sizes.numpy()
_lowerCAmelCase = []
for idx in range(len(_lowerCAmelCase ) ):
_lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_lowerCAmelCase )
_lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCAmelCase )
else:
_lowerCAmelCase = logits.argmax(dim=1 )
_lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 158 | 0 |
'''simple docstring'''
from math import factorial
lowerCAmelCase :dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowerCAmelCase ) )
def lowerCamelCase ( lowerCAmelCase : int = 60 , lowerCAmelCase : int = 100_0000 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__magic_name__ : Tuple = 0
# the cached sizes of the previous chains
__magic_name__ : dict[int, int] = {}
for start_chain_element in range(1 , lowerCAmelCase ):
# The temporary set will contain the elements of the chain
__magic_name__ : List[Any] = set()
__magic_name__ : Any = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__magic_name__ : Tuple = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowerCAmelCase )
chain_set_length += 1
__magic_name__ : List[Any] = digit_factorial_sum(lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__magic_name__ : str = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}') | 360 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__magic_name__ : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCAmelCase ( self : str ) -> Any:
__magic_name__ : Union[str, Any] = self.dummy_uncond_unet
__magic_name__ : str = KarrasVeScheduler()
__magic_name__ : List[Any] = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Dict = torch.manual_seed(0 )
__magic_name__ : int = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' ).images
__magic_name__ : Any = torch.manual_seed(0 )
__magic_name__ : str = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' , return_dict=_A )[0]
__magic_name__ : int = image[0, -3:, -3:, -1]
__magic_name__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : Optional[int] = 'google/ncsnpp-celebahq-256'
__magic_name__ : List[str] = UNetaDModel.from_pretrained(_A )
__magic_name__ : int = KarrasVeScheduler()
__magic_name__ : str = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Any = torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe(num_inference_steps=20 , generator=_A , output_type='numpy' ).images
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__magic_name__ : int = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 275 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
A : Optional[int] = DatasetInfosDict.from_directory(snake_case__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = str(snake_case__ )
dataset_info.write_to_directory(snake_case__ )
A : Optional[Any] = DatasetInfo.from_directory(snake_case__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(snake_case__ , '''dataset_info.json''' ) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : str = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
A : str = dataset_info._to_yaml_dict()
assert sorted(snake_case__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
A : Optional[Any] = yaml.safe_dump(snake_case__ )
A : Optional[int] = yaml.safe_load(snake_case__ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : str = DatasetInfo()
A : Tuple = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = str(snake_case__ )
dataset_infos_dict.write_to_directory(snake_case__ )
A : Tuple = DatasetInfosDict.from_directory(snake_case__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
A : Dict = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
A : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(snake_case__ , '''README.md''' ) )
| 3 |
'''simple docstring'''
import os
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 3 | 1 |
import random
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> List[Any]:
UpperCAmelCase : Dict = a[left_index]
UpperCAmelCase : Union[str, Any] = left_index + 1
for j in range(left_index + 1 , UpperCAmelCase ):
if a[j] < pivot:
UpperCAmelCase : List[Any] = a[i], a[j]
i += 1
UpperCAmelCase : Optional[Any] = a[i - 1], a[left_index]
return i - 1
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ) -> List[Any]:
if left < right:
UpperCAmelCase : List[Any] = random.randint(UpperCAmelCase , right - 1 )
UpperCAmelCase : Tuple = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCAmelCase : str = partition(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
quick_sort_random(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
UpperCAmelCase , pivot_index + 1 , UpperCAmelCase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''' ).strip()
UpperCAmelCase : Optional[int] = [int(UpperCAmelCase ) for item in user_input.split(''',''' )]
quick_sort_random(UpperCAmelCase , 0 , len(UpperCAmelCase ) )
print(UpperCAmelCase )
if __name__ == "__main__":
main()
| 359 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCamelCase : Any = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def a__ ( UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str]=None ) -> List[Any]:
if rng is None:
UpperCAmelCase : Dict = random.Random()
UpperCAmelCase : Optional[Any] = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase : List[str] = []
for _ in range(UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase : List[str] = np.array(UpperCAmelCase , dtype=jnp.intaa ).reshape(UpperCAmelCase )
return output
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
UpperCAmelCase : Optional[int] = ids_tensor(UpperCAmelCase , vocab_size=2 , rng=UpperCAmelCase )
# make sure that at least one token is attended to for each batch
UpperCAmelCase : str = 1
return attn_mask
@require_flax
class __UpperCAmelCase :
UpperCamelCase = None
UpperCamelCase = ()
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : Dict = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase : Dict = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase : Optional[int] = jnp.ones_like(__A )
UpperCAmelCase : Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase : Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self._get_input_ids_and_config()
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Any = max_length
UpperCAmelCase : List[Any] = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : List[Any] = getattr(__A, __A )
UpperCAmelCase : Union[str, Any] = pt_model_class(__A ).eval()
UpperCAmelCase : Tuple = load_flax_weights_in_pytorch_model(__A, flax_model.params )
UpperCAmelCase : Dict = flax_model.generate(__A ).sequences
UpperCAmelCase : str = pt_model.generate(torch.tensor(__A, dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : str = False
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[int] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = self._get_input_ids_and_config()
UpperCAmelCase : str = True
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : str = jit(model.generate )
UpperCAmelCase : List[Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = self._get_input_ids_and_config()
UpperCAmelCase : Dict = False
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : List[Any] = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : str = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : int = jit(model.generate )
UpperCAmelCase : Union[str, Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._get_input_ids_and_config()
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[int] = max_length
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : str = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences )
def __magic_name__ ( self : Any ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = self._get_input_ids_and_config()
UpperCAmelCase : str = True
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : Union[str, Any] = 0.8
UpperCAmelCase : str = 1_0
UpperCAmelCase : Any = 0.3
UpperCAmelCase : str = 1
UpperCAmelCase : Union[str, Any] = 8
UpperCAmelCase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[int] = jit(model.generate )
UpperCAmelCase : Any = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self._get_input_ids_and_config()
UpperCAmelCase : Optional[Any] = max_length
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : Optional[int] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Dict = jit(model.generate )
UpperCAmelCase : List[str] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : List[str] = max_length
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = 8
UpperCAmelCase : int = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Union[str, Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[str] = jit(model.generate )
UpperCAmelCase : Tuple = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Union[str, Any] = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Dict = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[Any] = jit(model.generate )
UpperCAmelCase : Dict = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase : List[str] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : int = '''Hello world'''
UpperCAmelCase : Optional[int] = tokenizer(__A, return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__A, '''do_samples''' ):
model.generate(__A, do_samples=__A )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__A, '''foo''' ):
UpperCAmelCase : Any = {'''foo''': '''bar'''}
model.generate(__A, **__A )
| 99 | 0 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase = 16
__lowerCAmelCase = 32
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 ):
_snake_case = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case = DatasetDict(
{
"""train""": dataset["""train"""].select(_SCREAMING_SNAKE_CASE ),
"""validation""": dataset["""train"""].select(_SCREAMING_SNAKE_CASE ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_snake_case = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case = 16
elif accelerator.mixed_precision != "no":
_snake_case = 8
else:
_snake_case = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
_snake_case = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
_snake_case = DataLoader(
tokenized_datasets["""test"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# New Code #
_snake_case = []
# Download the dataset
_snake_case = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_snake_case = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case = config["""lr"""]
_snake_case = int(config["""num_epochs"""] )
_snake_case = int(config["""seed"""] )
_snake_case = int(config["""batch_size"""] )
_snake_case = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_snake_case = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_snake_case = batch_size // MAX_GPU_BATCH_SIZE
_snake_case = MAX_GPU_BATCH_SIZE
set_seed(_SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
_snake_case = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_snake_case = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_SCREAMING_SNAKE_CASE ):
_snake_case, _snake_case, _snake_case = get_fold_dataloaders(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case = model.to(accelerator.device )
# Instantiate optimizer
_snake_case = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_snake_case = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case = model(**_SCREAMING_SNAKE_CASE )
_snake_case = outputs.loss
_snake_case = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case = model(**_SCREAMING_SNAKE_CASE )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case, _snake_case = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
_snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
_snake_case = []
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case = model(**_SCREAMING_SNAKE_CASE )
_snake_case = outputs.logits
_snake_case, _snake_case = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_snake_case = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
_snake_case = torch.stack(_SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_snake_case = metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
accelerator.print("""Average test metrics from all folds:""" , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=_SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" )
_snake_case = parser.parse_args()
_snake_case = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main() | 341 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCAmelCase = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__lowerCAmelCase = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
__lowerCAmelCase = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = CHRF.CHAR_ORDER , UpperCAmelCase = CHRF.WORD_ORDER , UpperCAmelCase = CHRF.BETA , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , ) -> int:
_snake_case = len(references[0] )
if any(len(UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
_snake_case = [[refs[i] for refs in references] for i in range(UpperCAmelCase )]
_snake_case = CHRF(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_snake_case = sb_chrf.corpus_score(UpperCAmelCase , UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
} | 341 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315 |
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case : Union[str, Any] ={'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] =['ViTFeatureExtractor']
__snake_case : Optional[Any] =['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] =[
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] =[
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =[
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 |
import math
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
if not isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase_)
if number < 1:
lowerCAmelCase__ : Dict = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCamelCase_)
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCAmelCase__ : Optional[Any] = int(math.log(number // 3 ,2)) + 2
lowerCAmelCase__ : Optional[Any] = [3, 5]
lowerCAmelCase__ : List[Any] = 2
lowerCAmelCase__ : Tuple = 3
for block in range(1 ,lowerCamelCase_):
for _ in range(lowerCamelCase_):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case : Optional[int] =0
try:
__snake_case : List[Any] =proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 129 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : str ={'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =[
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 |
from __future__ import annotations
from collections.abc import Generator
def __lowercase ( ) -> Generator[int, None, None]:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 2
while True:
__SCREAMING_SNAKE_CASE = factor_map.pop(a__ , a__ )
if factor:
__SCREAMING_SNAKE_CASE = factor + prime
while x in factor_map:
x += factor
__SCREAMING_SNAKE_CASE = factor
else:
__SCREAMING_SNAKE_CASE = prime
yield prime
prime += 1
def __lowercase ( a__ = 1E10 ) -> int:
__SCREAMING_SNAKE_CASE = sieve()
__SCREAMING_SNAKE_CASE = 1
while True:
__SCREAMING_SNAKE_CASE = next(a__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(a__ )
n += 2
if __name__ == "__main__":
print(solution())
| 118 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 1 |
from __future__ import annotations
class _snake_case :
def __init__( self , a) -> None:
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def lowerCamelCase__ (_UpperCAmelCase): # In Order traversal of the tree
if tree:
display(tree.left)
print(tree.data)
display(tree.right)
def lowerCamelCase__ (_UpperCAmelCase):
return 1 + max(depth_of_tree(tree.left) , depth_of_tree(tree.right)) if tree else 0
def lowerCamelCase__ (_UpperCAmelCase):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right)
else:
return not tree.left and not tree.right
def lowerCamelCase__ (): # Main function for testing.
SCREAMING_SNAKE_CASE = Node(1)
SCREAMING_SNAKE_CASE = Node(2)
SCREAMING_SNAKE_CASE = Node(3)
SCREAMING_SNAKE_CASE = Node(4)
SCREAMING_SNAKE_CASE = Node(5)
SCREAMING_SNAKE_CASE = Node(6)
SCREAMING_SNAKE_CASE = Node(7)
SCREAMING_SNAKE_CASE = Node(8)
SCREAMING_SNAKE_CASE = Node(9)
print(is_full_binary_tree(_UpperCAmelCase))
print(depth_of_tree(_UpperCAmelCase))
print('Tree is: ')
display(_UpperCAmelCase)
if __name__ == "__main__":
main()
| 327 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a_ : Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( A__ ):
def __init__( self , *a , a=None , a=None , a=None , **a) -> List[Any]:
super().__init__(*a , **a)
SCREAMING_SNAKE_CASE = eval_examples
SCREAMING_SNAKE_CASE = post_process_function
SCREAMING_SNAKE_CASE = quant_trainer_args
SCREAMING_SNAKE_CASE = 128 # default number of calibration samples
def SCREAMING_SNAKE_CASE__ ( self , a=None) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE = self._remove_unused_columns(a , description='Calibration')
return DataLoader(
a , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=a , )
def SCREAMING_SNAKE_CASE__ ( self , a=None) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE = self.get_calib_dataloader(a)
SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(a , self.quant_trainer_args , calib=a)
model.eval()
quant_trainer.enable_calibration(a)
logger.info('***** Running calibration *****')
logger.info(f''' Num examples = {self.calib_num}''')
logger.info(f''' Batch size = {calib_dataloader.batch_size}''')
for step, inputs in enumerate(a):
# Prediction step
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prediction_step(a , a , prediction_loss_only=a)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(a , self.quant_trainer_args)
SCREAMING_SNAKE_CASE = model
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a=None , a = "eval") -> str:
SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(a)
SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
a , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE = self.post_process_function(a , a , output.predictions)
SCREAMING_SNAKE_CASE = self.compute_metrics(a)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
SCREAMING_SNAKE_CASE = metrics.pop(a)
self.log(a)
else:
SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , a)
return metrics
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=None , a = "test") -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.get_test_dataloader(a)
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
a , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE = self.post_process_function(a , a , output.predictions , 'predict')
SCREAMING_SNAKE_CASE = self.compute_metrics(a)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
SCREAMING_SNAKE_CASE = metrics.pop(a)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a)
def SCREAMING_SNAKE_CASE__ ( self , a="./") -> List[Any]:
SCREAMING_SNAKE_CASE = self.eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(a)
SCREAMING_SNAKE_CASE = next(iter(a))
# saving device - to make it consistent
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
SCREAMING_SNAKE_CASE = tuple(v.to(a) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.model.to(a)
model.eval()
model.float()
SCREAMING_SNAKE_CASE = model.module if hasattr(a , 'module') else model
quant_trainer.configure_model(a , self.quant_trainer_args)
SCREAMING_SNAKE_CASE = os.path.join(a , 'model.onnx')
logger.info(f'''exporting model to {output_model_file}''')
SCREAMING_SNAKE_CASE = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
a , a , a , export_params=a , opset_version=13 , do_constant_folding=a , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=a , )
logger.info('onnx export finished')
| 327 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A =logging.get_logger(__name__)
class _a ( __a ):
def __init__( self : List[str] , *lowercase : Optional[Any] , **lowercase : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 34 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : int=3 , lowercase_ : Dict=32 , lowercase_ : Optional[Any]=3 , lowercase_ : Tuple=10 , lowercase_ : Optional[Any]=[10, 20, 30, 40] , lowercase_ : List[str]=[1, 1, 2, 1] , lowercase_ : Optional[int]=True , lowercase_ : str=True , lowercase_ : Dict="relu" , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=None , ) -> int:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Any = image_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : str = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : int = use_labels
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Union[str, Any] = scope
UpperCAmelCase : Any = len(lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = TFResNetModel(config=lowercase_ )
UpperCAmelCase : int = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ) -> List[Any]:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = TFResNetForImageClassification(lowercase_ )
UpperCAmelCase : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase_ : Dict = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[int] = TFResNetModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ )
UpperCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
def check_hidden_states_output(lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = model_class(lowercase_ )
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : List[Any] = layer_type
UpperCAmelCase : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = TFResNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Any = self.default_image_processor
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
UpperCAmelCase : List[Any] = model(**lowercase_ )
# verify the logits
UpperCAmelCase : Optional[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase : int = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1E-4 ) )
| 151 | 0 |
from __future__ import annotations
import math
def _A ( __magic_name__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__magic_name__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( __magic_name__ ):
lowercase__ = str(__magic_name__ )
lowercase__ = [n]
for i in range(1 , len(__magic_name__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _A ( __magic_name__ ):
if len(str(__magic_name__ ) ) > 3:
if not is_prime(int(str(__magic_name__ )[-3:] ) ) or not is_prime(int(str(__magic_name__ )[:3] ) ):
return False
return True
def _A ( __magic_name__ = 11 ):
lowercase__ = []
lowercase__ = 13
while len(__magic_name__ ) != count:
if validate(__magic_name__ ):
lowercase__ = list_truncated_nums(__magic_name__ )
if all(is_prime(__magic_name__ ) for i in list_nums ):
list_truncated_primes.append(__magic_name__ )
num += 2
return list_truncated_primes
def _A ( ):
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 352 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = 1
@register_to_config
def __init__( self :Dict , _lowercase :int = 10_00 , _lowercase :Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(_lowercase )
# standard deviation of the initial noise distribution
lowercase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase__ = 4
# running values
lowercase__ = []
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = num_inference_steps
lowercase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase__ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase__ = (1.0 - self.betas**2) ** 0.5
lowercase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase__ = timesteps.to(_lowercase )
lowercase__ = []
def UpperCAmelCase ( self :Optional[int] , _lowercase :torch.FloatTensor , _lowercase :int , _lowercase :torch.FloatTensor , _lowercase :bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase__ = (self.timesteps == timestep).nonzero().item()
lowercase__ = timestep_index + 1
lowercase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowercase )
if len(self.ets ) == 1:
lowercase__ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase__ = self._get_prev_sample(_lowercase , _lowercase , _lowercase , _lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :torch.FloatTensor , *_lowercase :int , **_lowercase :int ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :str , _lowercase :Tuple , _lowercase :int , _lowercase :Optional[Any] , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.alphas[timestep_index]
lowercase__ = self.betas[timestep_index]
lowercase__ = self.alphas[prev_timestep_index]
lowercase__ = self.betas[prev_timestep_index]
lowercase__ = (sample - sigma * ets) / max(_lowercase , 1e-8 )
lowercase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 201 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def A__ ( UpperCamelCase ):
A = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = Namespace(**checkpoint["cfg"]["model"] )
A = checkpoint["model"]
remove_ignore_keys_(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
A = XGLMConfig(
vocab_size=UpperCamelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
A = XGLMForCausalLM(UpperCamelCase )
A = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
print(UpperCamelCase )
A = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : Union[str, Any] = parser.parse_args()
_snake_case : List[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 292 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return f'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def __lowercase ( self , lowerCamelCase__=0 , lowerCamelCase__=(4, 4, 64, 64) , lowerCamelCase__=False ):
"""simple docstring"""
__UpperCamelCase : str =jnp.bfloataa if fpaa else jnp.floataa
__UpperCamelCase : Optional[Any] =jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__ , lowerCamelCase__ ) ) , dtype=lowerCamelCase__ )
return image
def __lowercase ( self , lowerCamelCase__=False , lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
__UpperCamelCase : List[Any] =jnp.bfloataa if fpaa else jnp.floataa
__UpperCamelCase : Optional[int] ='bf16' if fpaa else None
__UpperCamelCase , __UpperCamelCase : Any =FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__ , subfolder='unet' , dtype=lowerCamelCase__ , revision=lowerCamelCase__ )
return model, params
def __lowercase ( self , lowerCamelCase__=0 , lowerCamelCase__=(4, 77, 768) , lowerCamelCase__=False ):
"""simple docstring"""
__UpperCamelCase : str =jnp.bfloataa if fpaa else jnp.floataa
__UpperCamelCase : Optional[int] =jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__ , lowerCamelCase__ ) ) , dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Dict =self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowerCamelCase__ )
__UpperCamelCase : Dict =self.get_latents(lowerCamelCase__ , fpaa=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =self.get_encoder_hidden_states(lowerCamelCase__ , fpaa=lowerCamelCase__ )
__UpperCamelCase : List[str] =model.apply(
{'params': params} , lowerCamelCase__ , jnp.array(lowerCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase__ , ).sample
assert sample.shape == latents.shape
__UpperCamelCase : List[str] =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCamelCase : int =jnp.array(lowerCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Dict =self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =self.get_latents(lowerCamelCase__ , shape=(4, 4, 96, 96) , fpaa=lowerCamelCase__ )
__UpperCamelCase : int =self.get_encoder_hidden_states(lowerCamelCase__ , shape=(4, 77, 1024) , fpaa=lowerCamelCase__ )
__UpperCamelCase : str =model.apply(
{'params': params} , lowerCamelCase__ , jnp.array(lowerCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase__ , ).sample
assert sample.shape == latents.shape
__UpperCamelCase : int =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCamelCase : Optional[Any] =jnp.array(lowerCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 )
| 245 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A ( a_ ) -> int:
__UpperCamelCase : List[Any] =botoa.client('iam' )
__UpperCamelCase : List[str] ={
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a_ ,AssumeRolePolicyDocument=json.dumps(a_ ,indent=2 ) )
__UpperCamelCase : List[str] ={
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a_ ,PolicyName=F'{role_name}_policy_permission' ,PolicyDocument=json.dumps(a_ ,indent=2 ) ,)
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase : List[Any] =botoa.client('iam' )
return iam_client.get_role(RoleName=a_ )["Role"]["Arn"]
def A ( ) -> Tuple:
__UpperCamelCase : Any =_ask_options(
'How do you want to authorize?' ,['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] ,a_ ,)
__UpperCamelCase : str =None
if credentials_configuration == 0:
__UpperCamelCase : str =_ask_field('Enter your AWS Profile name: [default] ' ,default='default' )
__UpperCamelCase : Optional[Any] =aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
__UpperCamelCase : int =_ask_field('AWS Access Key ID: ' )
__UpperCamelCase : Dict =aws_access_key_id
__UpperCamelCase : Any =_ask_field('AWS Secret Access Key: ' )
__UpperCamelCase : Optional[Any] =aws_secret_access_key
__UpperCamelCase : Tuple =_ask_field('Enter your AWS Region: [us-east-1]' ,default='us-east-1' )
__UpperCamelCase : List[str] =aws_region
__UpperCamelCase : Any =_ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' ,['Provide IAM Role name', 'Create new IAM role using credentials'] ,a_ ,)
if role_management == 0:
__UpperCamelCase : Optional[Any] =_ask_field('Enter your IAM role name: ' )
else:
__UpperCamelCase : Dict ='accelerate_sagemaker_execution_role'
print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(a_ )
__UpperCamelCase : List[Any] =_ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : int =None
if is_custom_docker_image:
__UpperCamelCase : List[Any] =_ask_field('Enter your Docker image: ' ,lambda a_ : str(a_ ).lower() )
__UpperCamelCase : Union[str, Any] =_ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Optional[Any] =None
if is_sagemaker_inputs_enabled:
__UpperCamelCase : Optional[Any] =_ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' ,lambda a_ : str(a_ ).lower() ,)
__UpperCamelCase : str =_ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Dict =None
if is_sagemaker_metrics_enabled:
__UpperCamelCase : Optional[Any] =_ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' ,lambda a_ : str(a_ ).lower() ,)
__UpperCamelCase : int =_ask_options(
'What is the distributed mode?' ,['No distributed training', 'Data parallelism'] ,_convert_sagemaker_distributed_mode ,)
__UpperCamelCase : int ={}
__UpperCamelCase : str =_ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
if use_dynamo:
__UpperCamelCase : Dict ='dynamo_'
__UpperCamelCase : Optional[int] =_ask_options(
'Which dynamo backend would you like to use?' ,[x.lower() for x in DYNAMO_BACKENDS] ,_convert_dynamo_backend ,default=2 ,)
__UpperCamelCase : Tuple =_ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
if use_custom_options:
__UpperCamelCase : List[str] =_ask_options(
'Which mode do you want to use?' ,a_ ,lambda a_ : TORCH_DYNAMO_MODES[int(a_ )] ,default='default' ,)
__UpperCamelCase : Union[str, Any] =_ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Tuple =_ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Tuple ='Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
__UpperCamelCase : int =_ask_options(
a_ ,a_ ,lambda a_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCamelCase : List[str] =_ask_field(a_ ,lambda a_ : str(a_ ).lower() ,default='ml.p3.2xlarge' )
__UpperCamelCase : Union[str, Any] =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCamelCase : List[str] =_ask_field(
'How many machines do you want use? [1]: ' ,a_ ,default=1 ,)
__UpperCamelCase : Optional[Any] =_ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' ,['no', 'fp16', 'bf16', 'fp8'] ,_convert_mixed_precision ,)
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=a_ ,compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER ,distributed_type=a_ ,use_cpu=a_ ,dynamo_config=a_ ,eca_instance_type=a_ ,profile=a_ ,region=a_ ,iam_role_name=a_ ,mixed_precision=a_ ,num_machines=a_ ,sagemaker_inputs_file=a_ ,sagemaker_metrics_file=a_ ,)
| 245 | 1 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 1 |
"""simple docstring"""
import random
def _A ( _a : list , _a : Any ):
"""simple docstring"""
A , A , A = [], [], []
for element in data:
if element < pivot:
less.append(_a )
elif element > pivot:
greater.append(_a )
else:
equal.append(_a )
return less, equal, greater
def _A ( _a : list , _a : int ):
"""simple docstring"""
if index >= len(_a ) or index < 0:
return None
A = items[random.randint(0 , len(_a ) - 1 )]
A = 0
A , A , A = _partition(_a , _a )
A = len(_a )
A = len(_a )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_a , _a )
# must be in larger
else:
return quick_select(_a , index - (m + count) )
| 352 |
"""simple docstring"""
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Any:
A = 0
A = 0
A = {}
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
if vertex not in self.adjacency:
A = {}
self.num_vertices += 1
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[Any]:
self.add_vertex(lowerCamelCase_ )
self.add_vertex(lowerCamelCase_ )
if head == tail:
return
A = weight
A = weight
def UpperCamelCase__ ( self ) -> List[str]:
A = self.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase_ ) ):
A = list(edges[i] )
edges.sort(key=lambda lowerCamelCase_ : e[2] )
for i in range(len(lowerCamelCase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A = edges[i][2] + 1
for edge in edges:
A , A , A = edge
A = weight
A = weight
def __str__( self ) -> Dict:
A = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
A = self.adjacency[head][tail]
string += f'{head} -> {tail} == {weight}\n'
return string.rstrip("""\n""" )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCamelCase__ ( self ) -> List[str]:
return self.adjacency.keys()
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_=None ,lowerCamelCase_=None ) -> Optional[Any]:
A = Graph()
if vertices is None:
A = []
if edges is None:
A = []
for vertex in vertices:
g.add_vertex(lowerCamelCase_ )
for edge in edges:
g.add_edge(*lowerCamelCase_ )
return g
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> List[str]:
A = {}
A = {}
def __len__( self ) -> List[str]:
return len(self.parent )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
if item in self.parent:
return self.find(lowerCamelCase_ )
A = item
A = 0
return item
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
if item not in self.parent:
return self.make_set(lowerCamelCase_ )
if item != self.parent[item]:
A = self.find(self.parent[item] )
return self.parent[item]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
A = self.find(lowerCamelCase_ )
A = self.find(lowerCamelCase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A = roota
return roota
if self.rank[roota] < self.rank[roota]:
A = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A = roota
return roota
return None
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_ ) -> List[str]:
A = graph.num_vertices
A = Graph.UnionFind()
A = []
while num_components > 1:
A = {}
for vertex in graph.get_vertices():
A = -1
A = graph.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for edge in edges:
A , A , A = edge
A = union_find.find(lowerCamelCase_ )
A = union_find.find(lowerCamelCase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A , A , A = cheap_edge[vertex]
if union_find.find(lowerCamelCase_ ) != union_find.find(lowerCamelCase_ ):
union_find.union(lowerCamelCase_ ,lowerCamelCase_ )
mst_edges.append(cheap_edge[vertex] )
A = num_components - 1
A = Graph.build(edges=lowerCamelCase_ )
return mst
| 77 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_UpperCAmelCase : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174 |
'''simple docstring'''
from collections.abc import Generator
def __magic_name__( ):
__lowerCAmelCase , __lowerCAmelCase = 0, 1
while True:
__lowerCAmelCase , __lowerCAmelCase = b, a + b
yield b
def __magic_name__( lowerCamelCase = 1_0_0_0):
__lowerCAmelCase = 1
__lowerCAmelCase = fibonacci_generator()
while len(str(next(lowerCamelCase))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 174 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 220 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase = 4_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase , _lowerCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = b, a + b
return sum(lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 220 | 1 |
'''simple docstring'''
import operator
def __snake_case ( UpperCAmelCase_ : list , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : list | None = None ):
lowerCamelCase_ = operator.lt if reverse else operator.gt
lowerCamelCase_ = solution or []
if not arr:
return solution
lowerCamelCase_ = [arr.pop(0 )]
for i, item in enumerate(UpperCAmelCase_ ):
if _operator(UpperCAmelCase_ , sublist[-1] ):
sublist.append(UpperCAmelCase_ )
arr.pop(UpperCAmelCase_ )
# merging sublist into solution list
if not solution:
solution.extend(UpperCAmelCase_ )
else:
while sublist:
lowerCamelCase_ = sublist.pop(0 )
for i, xx in enumerate(UpperCAmelCase_ ):
if not _operator(UpperCAmelCase_ , UpperCAmelCase_ ):
solution.insert(UpperCAmelCase_ , UpperCAmelCase_ )
break
else:
solution.append(UpperCAmelCase_ )
strand_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 55 |
'''simple docstring'''
from __future__ import annotations
import queue
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> str:
_UpperCAmelCase : Optional[Any] = data
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Union[str, Any] = None
def snake_case_ ( )-> TreeNode:
'''simple docstring'''
print("""\n********Press N to stop entering at any point of time********\n""" )
_UpperCAmelCase : Any = input("""Enter the value of the root node: """ ).strip().lower()
_UpperCAmelCase : queue.Queue = queue.Queue()
_UpperCAmelCase : List[str] = TreeNode(int(lowerCAmelCase_ ) )
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : str = q.get()
_UpperCAmelCase : Any = F'''Enter the left node of {node_found.data}: '''
_UpperCAmelCase : Union[str, Any] = input(lowerCAmelCase_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_UpperCAmelCase : List[str] = TreeNode(int(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[int] = left_node
q.put(lowerCAmelCase_ )
_UpperCAmelCase : Dict = F'''Enter the right node of {node_found.data}: '''
_UpperCAmelCase : Tuple = input(lowerCAmelCase_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_UpperCAmelCase : Any = TreeNode(int(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = right_node
q.put(lowerCAmelCase_ )
raise
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : Dict = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : Optional[int] = []
while not q.empty():
_UpperCAmelCase : Optional[int] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = n.left
# end of while means current node doesn't have left child
_UpperCAmelCase : int = stack.pop()
# start to traverse its right child
_UpperCAmelCase : Any = n.right
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : Optional[Any] = node
while n or stack:
while n:
stack.append(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = n.left
_UpperCAmelCase : Union[str, Any] = stack.pop()
print(n.data , end=""",""" )
_UpperCAmelCase : Any = n.right
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase ,_UpperCAmelCase : str = [], []
_UpperCAmelCase : Dict = node
stacka.append(lowerCAmelCase_ )
while stacka: # to find the reversed order of post order, store it in stack2
_UpperCAmelCase : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowerCAmelCase_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def snake_case_ ( lowerCAmelCase_ = "" , lowerCAmelCase_=50 , lowerCAmelCase_="*" )-> str:
'''simple docstring'''
if not s:
return "\n" + width * char
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = divmod(width - len(lowerCAmelCase_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
A_ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 215 | 0 |
from collections.abc import Sequence
def lowerCamelCase_ ( UpperCamelCase__ : Sequence[int] | None = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
UpperCamelCase__ = nums[0]
for i in range(1, len(UpperCamelCase__ ) ):
UpperCamelCase__ = nums[i]
UpperCamelCase__ = max(UpperCamelCase__, ans + num, UpperCamelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input("""Enter number of elements : """).strip())
lowercase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 35 | lowercase = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : float ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 3_8_4
if "tiny" in model_name:
UpperCAmelCase__ = [3, 3, 9, 3]
UpperCAmelCase__ = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
UpperCAmelCase__ = [3, 3, 2_7, 3]
UpperCAmelCase__ = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
UpperCAmelCase__ = [3, 3, 2_7, 3]
UpperCAmelCase__ = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
UpperCAmelCase__ = 5_1_2
if "large" in model_name:
UpperCAmelCase__ = [3, 3, 2_7, 3]
UpperCAmelCase__ = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
UpperCAmelCase__ = 7_6_8
if "xlarge" in model_name:
UpperCAmelCase__ = [3, 3, 2_7, 3]
UpperCAmelCase__ = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
UpperCAmelCase__ = 1_0_2_4
# set label information
UpperCAmelCase__ = 1_5_0
UpperCAmelCase__ = 'huggingface/label-files'
UpperCAmelCase__ = 'ade20k-id2label.json'
UpperCAmelCase__ = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase__ = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = ConvNextConfig(
depths=lowerCamelCase , hidden_sizes=lowerCamelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
UpperCAmelCase__ = UperNetConfig(
backbone_config=lowerCamelCase , auxiliary_in_channels=lowerCamelCase , num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , )
return config
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = dct.pop(lowerCamelCase )
UpperCAmelCase__ = val
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
UpperCAmelCase__ = model_name_to_url[model_name]
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location='cpu' )['state_dict']
UpperCAmelCase__ = get_upernet_config(lowerCamelCase )
UpperCAmelCase__ = UperNetForSemanticSegmentation(lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase__ = state_dict.pop(lowerCamelCase )
if "bn" in key:
UpperCAmelCase__ = key.replace('bn' , 'batch_norm' )
UpperCAmelCase__ = val
# rename keys
UpperCAmelCase__ = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
# verify on image
UpperCAmelCase__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
UpperCAmelCase__ = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert('RGB' )
UpperCAmelCase__ = SegformerImageProcessor()
UpperCAmelCase__ = processor(lowerCamelCase , return_tensors='pt' ).pixel_values
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase__ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase__ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase__ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase__ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ : Any = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 98 | """simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=lowerCAmelCase , )
assert hasattr(self , 'env' )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = {
'enabled': True,
'processes_per_host': 8,
}
snake_case = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
snake_case = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
snake_case = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='py36' , )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
TrainingJobAnalytics(lowerCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
snake_case = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , lowerCAmelCase )
| 149 | """simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE__ = os.path.join(git_repo_path, "src", "transformers")
SCREAMING_SNAKE_CASE__ = "\n{0} = None\n"
SCREAMING_SNAKE_CASE__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
SCREAMING_SNAKE_CASE__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(lowerCAmelCase )
snake_case = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(lowerCAmelCase , 'tokenizers' )
snake_case = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(lowerCAmelCase , 'tensorflow_text' )
snake_case = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers' )
snake_case = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tensorflow_text' )
snake_case = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers_and_vision' )
def snake_case ( self ):
"""simple docstring"""
snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowerCAmelCase )
self.assertIn('tensorflow_text' , lowerCAmelCase )
self.assertIn('sentencepiece_and_tokenizers' , lowerCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def snake_case ( self ):
"""simple docstring"""
snake_case = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowerCAmelCase , '\nCONSTANT = None\n' )
snake_case = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowerCAmelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowerCAmelCase )
| 149 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class __snake_case :
def __init__( self : Optional[Any] , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
class __snake_case :
def __init__( self : List[str] , _lowercase : Node ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tree
def __a ( self : int , _lowercase : Node | None ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Optional[int] ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowerCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowerCamelCase : int = [0, 25, 50]
__lowerCamelCase : Tuple = [25, 50, 75]
__lowerCamelCase : List[str] = fuzz.membership.trimf(X, abca)
__lowerCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowerCamelCase : List[str] = np.ones(75)
__lowerCamelCase : Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowerCamelCase : str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowerCamelCase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowerCamelCase : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowerCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowerCamelCase : List[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowerCamelCase : int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowerCamelCase : int = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowerCamelCase : str = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 219 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def A ( a_ ,a_ ,a_ ) -> List[Any]:
__UpperCamelCase : Optional[int] =AutoConfig.from_pretrained(a_ )
__UpperCamelCase : str =FlaxAutoModelForSeqaSeqLM.from_config(config=a_ )
__UpperCamelCase : Optional[int] =checkpoints.load_tax_checkpoint(a_ )
__UpperCamelCase : Optional[int] ='wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
__UpperCamelCase : Optional[Any] ='SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__UpperCamelCase : Dict ='LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCamelCase : Union[str, Any] ='TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
__UpperCamelCase : str =F'layers_{str(a_ )}'
# Self-Attention
__UpperCamelCase : Tuple =tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
__UpperCamelCase : str =tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
__UpperCamelCase : Tuple =tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
__UpperCamelCase : Any =tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCamelCase : Union[str, Any] =tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
__UpperCamelCase : Union[str, Any] =tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
__UpperCamelCase : Dict =tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
__UpperCamelCase : Optional[int] =tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
__UpperCamelCase : Optional[Any] =tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
__UpperCamelCase : Any =tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
__UpperCamelCase : str =tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
__UpperCamelCase : Any =flax_model.params['encoder']['block'][str(a_ )]['layer']
__UpperCamelCase : Optional[Any] =tax_attention_key
__UpperCamelCase : List[Any] =tax_attention_out
__UpperCamelCase : Tuple =tax_attention_query
__UpperCamelCase : Any =tax_attention_value
__UpperCamelCase : Any =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCamelCase : Optional[Any] =tax_global_layer_norm
if split_mlp_wi:
__UpperCamelCase : Tuple =tax_mlp_wi_a
__UpperCamelCase : Dict =tax_mlp_wi_a
else:
__UpperCamelCase : Optional[Any] =tax_mlp_wi
__UpperCamelCase : Dict =tax_mlp_wo
__UpperCamelCase : Optional[Any] =tax_mlp_layer_norm
__UpperCamelCase : Dict =flax_model_encoder_layer_block
# Only for layer 0:
__UpperCamelCase : List[str] =tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
__UpperCamelCase : List[Any] =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCamelCase : List[Any] =tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
__UpperCamelCase : List[Any] =tax_encoder_global_rel_embedding
# Assigning
__UpperCamelCase : int =tax_model['target']['encoder']['encoder_norm']['scale']
__UpperCamelCase : int =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__UpperCamelCase : Optional[Any] =F'layers_{str(a_ )}'
# Self-Attention
__UpperCamelCase : List[Any] =tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
__UpperCamelCase : List[str] =tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
__UpperCamelCase : Optional[int] =tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
__UpperCamelCase : int =tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
__UpperCamelCase : int =tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
__UpperCamelCase : str =tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
__UpperCamelCase : List[Any] =tax_enc_dec_attention_module['key']['kernel']
__UpperCamelCase : Tuple =tax_enc_dec_attention_module['out']['kernel']
__UpperCamelCase : Optional[Any] =tax_enc_dec_attention_module['query']['kernel']
__UpperCamelCase : List[str] =tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
__UpperCamelCase : Union[str, Any] =tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
__UpperCamelCase : str =tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
__UpperCamelCase : Optional[int] =tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
__UpperCamelCase : List[str] =tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
__UpperCamelCase : Dict =tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
__UpperCamelCase : Optional[int] =tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
__UpperCamelCase : Any =flax_model.params['decoder']['block'][str(a_ )]['layer']
__UpperCamelCase : List[str] =tax_attention_key
__UpperCamelCase : int =tax_attention_out
__UpperCamelCase : Tuple =tax_attention_query
__UpperCamelCase : str =tax_attention_value
__UpperCamelCase : str =tax_pre_attention_layer_norm
__UpperCamelCase : str =tax_enc_dec_attention_key
__UpperCamelCase : Optional[int] =tax_enc_dec_attention_out
__UpperCamelCase : List[Any] =tax_enc_dec_attention_query
__UpperCamelCase : List[Any] =tax_enc_dec_attention_value
__UpperCamelCase : Optional[int] =tax_cross_layer_norm
if split_mlp_wi:
__UpperCamelCase : Dict =tax_mlp_wi_a
__UpperCamelCase : int =tax_mlp_wi_a
else:
__UpperCamelCase : List[Any] =tax_mlp_wi
__UpperCamelCase : Dict =tax_mlp_wo
__UpperCamelCase : Tuple =txa_mlp_layer_norm
__UpperCamelCase : Optional[Any] =flax_model_decoder_layer_block
# Decoder Normalization
__UpperCamelCase : List[Any] =tax_model['target']['decoder']['decoder_norm']['scale']
__UpperCamelCase : Tuple =txa_decoder_norm
# Only for layer 0:
__UpperCamelCase : int =tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
__UpperCamelCase : Any =tax_decoder_rel_embedding
# Token Embeddings
__UpperCamelCase : int =tax_model['target']['token_embedder']['embedding']
__UpperCamelCase : Optional[Any] =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__UpperCamelCase : Dict =tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(a_ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
A_ :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
A_ :List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 245 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ :Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 245 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ :List[str] = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Dict = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[Any] = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowercase__ :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 |
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=1000 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase = n - 1
lowercase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase = 0
while count < prec:
lowercase = random.randint(2 , n - 1 )
lowercase = bin_exp_mod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if b != 1:
lowercase = True
for _ in range(lowerCAmelCase__ ):
if b == n - 1:
lowercase = False
break
lowercase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowercase__ :Tuple = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 101 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowercase__ =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
__a : str = {}
with open(lowerCAmelCase__ , '''r''' ) as file:
for line_number, line in enumerate(lowerCAmelCase__ ):
__a : Dict = line.strip()
if line:
__a : List[str] = line.split()
__a : Optional[int] = line_number
__a : Optional[int] = words[0]
__a : List[Any] = value
return result
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] ):
for attribute in key.split('''.''' ):
__a : Union[str, Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__a : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
__a : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__a : Dict = '''param'''
if weight_type is not None and weight_type != "param":
__a : Optional[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
elif weight_type is not None and weight_type == "param":
__a : List[str] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__a : List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__a : int = shape_pointer.shape
# let's reduce dimension
__a : Union[str, Any] = value[0]
else:
__a : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__a : Union[str, Any] = value
elif weight_type == "weight_g":
__a : Tuple = value
elif weight_type == "weight_v":
__a : str = value
elif weight_type == "bias":
__a : int = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__a : int = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Tuple = value
else:
__a : List[str] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] ):
__a : List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
__a : str = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__a : Optional[Any] = '''param'''
if weight_type is not None and weight_type != "param":
__a : Optional[int] = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__a : int = '''.'''.join([key, hf_param_name] )
else:
__a : List[str] = key
__a : List[str] = value if '''lm_head''' in full_key else value[0]
lowercase__ ={
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : str=None ):
__a : int = False
for key, mapped_key in MAPPING.items():
__a : Union[str, Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__a : Optional[Any] = True
if "*" in mapped_key:
__a : Tuple = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
__a : Dict = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
__a : Optional[int] = '''weight_g'''
elif "weight_v" in name:
__a : Optional[int] = '''weight_v'''
elif "bias" in name:
__a : Dict = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : List[str] = '''weight'''
else:
__a : Union[str, Any] = None
if hf_dict is not None:
rename_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return is_used
return is_used
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ):
__a : List[str] = []
__a : Any = fairseq_model.state_dict()
__a : int = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__a : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
__a : Optional[Any] = True
else:
__a : Optional[int] = load_wavaveca_layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"Unused weights: {unused_weights}" )
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str ):
__a : Tuple = full_name.split('''conv_layers.''' )[-1]
__a : Optional[int] = name.split('''.''' )
__a : int = int(items[0] )
__a : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__a : Union[str, Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__a : Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__a : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__a : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=False ):
if config_path is not None:
__a : Dict = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
else:
__a : Dict = WavaVecaConfig()
if is_seq_class:
__a : str = read_txt_into_dict(lowerCAmelCase__ )
__a : str = idalabel
__a : str = WavaVecaForSequenceClassification(lowerCAmelCase__ )
__a : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
feature_extractor.save_pretrained(lowerCAmelCase__ )
elif is_finetuned:
if dict_path:
__a : Tuple = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : Optional[Any] = target_dict.pad_index
__a : Tuple = target_dict.bos_index
__a : int = target_dict.eos_index
__a : Any = len(target_dict.symbols )
__a : Any = os.path.join(lowerCAmelCase__ , '''vocab.json''' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
__a : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : str = 0
__a : Any = 1
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Tuple = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase__ , )
__a : Any = True if config.feat_extract_norm == '''layer''' else False
__a : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
__a : Dict = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
__a : Tuple = WavaVecaForCTC(lowerCAmelCase__ )
else:
__a : List[str] = WavaVecaForPreTraining(lowerCAmelCase__ )
if is_finetuned or is_seq_class:
__a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__a : List[str] = argparse.Namespace(task='''audio_pretraining''' )
__a : int = fairseq.tasks.setup_task(lowerCAmelCase__ )
__a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
__a : int = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowercase__ =parser.parse_args()
lowercase__ =not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 90 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ =50000
lowercase__ =5000
lowercase__ , lowercase__ =os.path.split(__file__)
lowercase__ =os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __UpperCamelCase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : List[str] ):
for i in range(lowerCAmelCase__ ):
__a : str = dataset[i]
@get_duration
def __UpperCamelCase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple ):
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
__a : Optional[int] = dataset[i : i + batch_size]
@get_duration
def __UpperCamelCase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
__a : Dict = dataset[i]
@get_duration
def __UpperCamelCase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
__a : int = dataset[i : i + batch_size]
def __UpperCamelCase ( ):
__a : Any = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__a : List[Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0_0}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0_0_0}),
]
__a : Union[str, Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0_0}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__a : Optional[Any] = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__a : Optional[int] = generate_example_dataset(
os.path.join(lowerCAmelCase__ , '''dataset.arrow''' ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={'''list''': (1_0_0,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
__a : str = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print('''shuffling dataset''' )
__a : int = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(lowerCAmelCase__ ) )
__a : List[Any] = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''wb''' ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 90 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__lowerCAmelCase : List[Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
__lowerCAmelCase : List[Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
__lowerCAmelCase : Optional[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=False ) -> List[str]:
if rouge_types is None:
a = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
a = rouge_scorer.RougeScorer(rouge_types=__lowerCamelCase , use_stemmer=__lowerCamelCase )
if use_aggregator:
a = scoring.BootstrapAggregator()
else:
a = []
for ref, pred in zip(__lowerCamelCase , __lowerCamelCase ):
a = scorer.score(__lowerCamelCase , __lowerCamelCase )
if use_aggregator:
aggregator.add_scores(__lowerCamelCase )
else:
scores.append(__lowerCamelCase )
if use_aggregator:
a = aggregator.aggregate()
else:
a = {}
for key in scores[0]:
a = [score[key] for score in scores]
return result
| 107 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=16 , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=14 , lowerCAmelCase=10 , lowerCAmelCase=19 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=True , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=[1, 2, 3, 4, 5] , lowerCAmelCase=25 , lowerCAmelCase=5 , ) -> Optional[Any]:
'''simple docstring'''
_lowercase =d_model
_lowercase =parent
_lowercase =batch_size
_lowercase =prediction_length
_lowercase =context_length
_lowercase =cardinality
_lowercase =num_time_features
_lowercase =lags_sequence
_lowercase =embedding_dimension
_lowercase =is_training
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =context_length
_lowercase =prediction_length + label_length
_lowercase =label_length
_lowercase =moving_average
_lowercase =autocorrelation_factor
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =config.context_length + max(config.lags_sequence )
_lowercase =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_lowercase =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowercase =floats_tensor([self.batch_size, _past_length] )
_lowercase =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowercase =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowercase =floats_tensor([self.batch_size, config.prediction_length] )
_lowercase ={
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =self.get_config()
_lowercase =self.prepare_autoformer_inputs_dict(lowerCAmelCase )
return config, inputs_dict
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase =self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =AutoformerModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
_lowercase =model(**lowerCAmelCase )
_lowercase =outputs.encoder_last_hidden_state
_lowercase =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =model.get_encoder()
encoder.save_pretrained(lowerCAmelCase )
_lowercase =AutoformerEncoder.from_pretrained(lowerCAmelCase ).to(lowerCAmelCase )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =model.create_network_inputs(**lowerCAmelCase )
_lowercase , _lowercase =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowercase =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_lowercase =encoder(inputs_embeds=lowerCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_lowercase =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_lowercase =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_lowercase =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_lowercase =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =model.get_decoder()
decoder.save_pretrained(lowerCAmelCase )
_lowercase =AutoformerDecoder.from_pretrained(lowerCAmelCase ).to(lowerCAmelCase )
_lowercase =decoder(
trend=lowerCAmelCase , inputs_embeds=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_a = (AutoformerForPrediction,) if is_torch_available() else ()
_a = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =AutoformerModelTester(self )
_lowercase =ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
_lowercase , _lowercase =model_class.from_pretrained(lowerCAmelCase , output_loading_info=lowerCAmelCase )
self.assertEqual(info['missing_keys'] , [] )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase )
@unittest.skip(reason='Model has no tokens embeddings' )
def A__ ( self ) -> int:
'''simple docstring'''
pass
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =inspect.signature(getattr(lowerCAmelCase , 'forward' ) )
# The main input is the name of the argument after `self`
_lowercase =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =[
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase )] , lowerCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =True
_lowercase =getattr(self.model_tester , 'seq_length' , lowerCAmelCase )
_lowercase =getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase )
_lowercase =getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase )
_lowercase =getattr(self.model_tester , 'd_model' , lowerCAmelCase )
_lowercase =getattr(self.model_tester , 'num_attention_heads' , lowerCAmelCase )
_lowercase =d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowercase =True
_lowercase =False
_lowercase =True
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
_lowercase =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase =True
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
_lowercase =outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_lowercase =len(lowerCAmelCase )
_lowercase =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# decoder attentions
_lowercase =outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase , (list, tuple) )
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_lowercase =outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase , (list, tuple) )
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_lowercase =True
_lowercase =True
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase ) )
_lowercase =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> Dict:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def a ( A__ : List[str]="train-batch.pt" ) -> str:
"""simple docstring"""
_lowercase =hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=A__ , repo_type='dataset' )
_lowercase =torch.load(A__ , map_location=A__ )
return batch
@require_torch
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(lowerCAmelCase )
_lowercase =prepare_batch()
with torch.no_grad():
_lowercase =model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
_lowercase =torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase )
_lowercase =torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=lowerCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(lowerCAmelCase )
_lowercase =prepare_batch('val-batch.pt' )
with torch.no_grad():
_lowercase =model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
_lowercase =torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase )
_lowercase =torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=lowerCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(lowerCAmelCase )
_lowercase =prepare_batch('val-batch.pt' )
with torch.no_grad():
_lowercase =model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
_lowercase =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase )
_lowercase =torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=lowerCAmelCase )
_lowercase =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase , rtol=1e-1 ) )
| 205 | 0 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 365 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11 | 0 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 |
'''simple docstring'''
from math import factorial, radians
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 18 , _SCREAMING_SNAKE_CASE = 10 ):
_snake_case = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
_snake_case = radians(_SCREAMING_SNAKE_CASE )
_snake_case = angle_in_radians
_snake_case = 3
_snake_case = -1
for _ in range(_SCREAMING_SNAKE_CASE ):
result += (b * (angle_in_radians**a)) / factorial(_SCREAMING_SNAKE_CASE )
_snake_case = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__import__('doctest').testmod() | 341 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase: Union[str, Any] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Any = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_lowercase: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Dict = logging.get_logger()
@dataclass
class _lowercase :
"""simple docstring"""
__A = 42
__A = field(default_factory=lowerCAmelCase )
__A = field(default_factory=lowerCAmelCase )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase_ , nn.Convad ) or isinstance(lowerCamelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase_ )
def __call__(self , lowerCamelCase_ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return list(filter(lambda lowerCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowercase :
"""simple docstring"""
__A = 42
__A = 42
__A = 1
__A = field(default_factory=lowerCAmelCase )
__A = field(default_factory=lowerCAmelCase )
__A = True
def __call__(self , lowerCamelCase_ ):
"""simple docstring"""
a = Tracker(self.dest )(lowerCamelCase_ ).parametrized
a = Tracker(self.src )(lowerCamelCase_ ).parametrized
a = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.src_skip , lowerCamelCase_ ) )
a = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.dest_skip , lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCamelCase_ )} operations while'''
F''' destination module has {len(lowerCamelCase_ )}.''' )
for dest_m, src_m in zip(lowerCamelCase_ , lowerCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
a = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F'''Unexpected layer name {k}'''
a = len(lowerCamelCase_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
a = nn.ModuleDict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return get_trunk_forward_outputs(
lowerCamelCase_ , out_feat_keys=lowerCamelCase_ , feature_blocks=self._feature_blocks , )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self , lowerCamelCase_ ):
"""simple docstring"""
if x not in self:
a = self.convert_name_to_timm(lowerCamelCase_ )
a = partial(lambda: (timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval(), None) )
else:
a = super().__getitem__(lowerCamelCase_ )
return val
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __getitem__(self , lowerCamelCase_ ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
a = RegNetModel
else:
a = RegNetForImageClassification
return val
def a( A : Dict , A : List[Any] , A : List[Tuple[str, str]] ) -> Union[str, Any]:
"""simple docstring"""
for from_key, to_key in keys:
a = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def a( A : str , A : Callable[[], nn.Module] , A : Callable[[], nn.Module] , A : RegNetConfig , A : Path , A : bool = True , ) -> List[str]:
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
a , a = from_model_func()
a = our_model_func(A ).eval()
a = ModuleTransfer(src=A , dest=A , raise_if_mismatch=A )
a = torch.randn((1, 3, 224, 224) )
module_transfer(A )
if from_state_dict is not None:
a = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
a = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
a = manually_copy_vissl_head(A , our_model.state_dict() , A )
our_model.load_state_dict(A )
a = our_model(A , output_hidden_states=A )
a = (
our_outputs.logits if isinstance(A , A ) else our_outputs.last_hidden_state
)
a = from_model(A )
a = from_output[-1] if type(A ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
a = our_outputs.hidden_states[-1]
assert torch.allclose(A , A ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=A , )
a = 224 if "seer" not in name else 384
# we can use the convnext one
a = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=A )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=A , )
print(f'''Pushed {name}''' )
def a( A : Path , A : str = None , A : bool = True ) -> Dict:
"""simple docstring"""
a = "imagenet-1k-id2label.json"
a = 1000
a = (1, num_labels)
a = "huggingface/label-files"
a = num_labels
a = json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = partial(A , num_labels=A , idalabel=A , labelaid=A )
a = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
a = NameToOurModelFuncMap()
a = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(A : str , A : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
a = torch.hub.load_state_dict_from_url(A , model_dir=str(A ) , map_location="cpu" )
a = model_func()
# check if we have a head, if yes add it
a = files["classy_state_dict"]["base_model"]["model"]
a = model_state_dict["trunk"]
model.load_state_dict(A )
return model.eval(), model_state_dict["heads"]
# pretrained
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , A , A , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , A , A , A , )
return config, expected_shape
if __name__ == "__main__":
_lowercase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowercase: Optional[int] = parser.parse_args()
_lowercase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 71 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : int = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 233 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 0 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : Optional[int] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=None ):
"""simple docstring"""
require_version(deps[pkg] , lowercase__ )
| 359 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __UpperCamelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE = WavaVecaPhonemeCTCTokenizer
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ (self : Tuple):
super().setUp()
A = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" ")
A = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
A = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + "\n")
def SCREAMING_SNAKE_CASE__ (self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[Any]=2_0 , __SCREAMING_SNAKE_CASE : Any=5):
A = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)) for i in range(len(__SCREAMING_SNAKE_CASE))]
A = list(filter(lambda __SCREAMING_SNAKE_CASE: [t[0]] == tokenizer.encode(t[1] , do_phonemize=__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE))
if max_length is not None and len(__SCREAMING_SNAKE_CASE) > max_length:
A = toks[:max_length]
if min_length is not None and len(__SCREAMING_SNAKE_CASE) < min_length and len(__SCREAMING_SNAKE_CASE) > 0:
while len(__SCREAMING_SNAKE_CASE) < min_length:
A = toks + toks
# toks_str = [t[1] for t in toks]
A = [t[0] for t in toks]
# Ensure consistency
A = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)
if " " not in output_txt and len(__SCREAMING_SNAKE_CASE) > 1:
A = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)
)
if with_prefix_space:
A = " " + output_txt
A = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE)
return output_txt, output_ids
def SCREAMING_SNAKE_CASE__ (self : List[Any] , **__SCREAMING_SNAKE_CASE : Any):
kwargs.update(self.special_tokens_map)
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
# check adding a single token
tokenizer.add_tokens("xxx")
A = tokenizer("m xxx ɪ" , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , [1_3, 3_9_2, 1_7]) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"])
A = tokenizer("m aaa ɪ ccc" , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , [1_3, 3_9_3, 1_7, 3_9_5]) # aaa and ccc should be after xxx and 2 after aaa
A = tokenizer("maɪ c" , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , [3, 2_0_0]) # mai should be <unk> (=3)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(__SCREAMING_SNAKE_CASE , "h ə l oʊ h aʊ ɑːɹ j uː")
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , tokenizer(__SCREAMING_SNAKE_CASE , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids)
def SCREAMING_SNAKE_CASE__ (self : Any):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
A = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE).input_ids)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : str):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
A = tokenizer.decode(sample_ids[0])
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0])
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(__SCREAMING_SNAKE_CASE , "h ə l oʊ | h aʊ | ɑːɹ | j uː |")
def SCREAMING_SNAKE_CASE__ (self : str):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
self.assertEqual(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , tokenizer(__SCREAMING_SNAKE_CASE , do_phonemize=__SCREAMING_SNAKE_CASE).input_ids)
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
# fmt: off
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
A = tokenizer.decode(sample_ids[0])
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0])
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
# decode with no word_del_token filter
A = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , batch_tokens[0])
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"])
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
A = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|")
tokenizer.add_tokens("|")
A = "Hello how are you"
A = tokenizer.phonemize(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us")
A = tokenizer.decode(tokenizer(__SCREAMING_SNAKE_CASE).input_ids , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |")]).strip() , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__SCREAMING_SNAKE_CASE)
A = "Hello how are you"
A = tokenizer(__SCREAMING_SNAKE_CASE , phonemizer_lang="en-us").input_ids
A = tokenizer(__SCREAMING_SNAKE_CASE , phonemizer_lang="fr-fr").input_ids
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = tokenizer.decode(__SCREAMING_SNAKE_CASE)
A = tokenizer.decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , "h ə l oʊ h aʊ ɑːɹ j uː")
self.assertEqual(__SCREAMING_SNAKE_CASE , "ɛ l o h aʊ a ʁ j u")
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
A = "Hello how Are you"
A = "hello how are you"
A = tokenizer(__SCREAMING_SNAKE_CASE).input_ids
A = tokenizer(__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
tokenizer.add_tokens(["!", "?"])
tokenizer.add_special_tokens({"cls_token": "$$$"})
# fmt: off
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"])
@staticmethod
def SCREAMING_SNAKE_CASE__ (__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]):
A = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = self.get_tokenizer(word_delimiter_token="|")
tokenizer.add_tokens("|")
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
A = tokenizer.decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE , filter_word_delimiter_token=__SCREAMING_SNAKE_CASE)
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys()) , 2)
self.assertTrue("text" in outputs)
self.assertTrue("char_offsets" in outputs)
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char")) , outputs.text)
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char") , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"])
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset") , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6])
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset") , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7])
def SCREAMING_SNAKE_CASE__ (self : Any):
A = self.get_tokenizer(word_delimiter_token="|")
def check_list_tuples_equal(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any]):
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
self.assertTrue(isinstance(outputs_list[0] , __SCREAMING_SNAKE_CASE))
# transform list to ModelOutput
A = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]})
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"])
def recursive_check(__SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
[recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for la, la in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"])
# fmt: off
A = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE)
A = [tokenizer.decode(__SCREAMING_SNAKE_CASE , output_char_offsets=__SCREAMING_SNAKE_CASE) for ids in sample_ids]
check_list_tuples_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes")
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes")
def SCREAMING_SNAKE_CASE__ (self : Dict):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency")
def SCREAMING_SNAKE_CASE__ (self : str):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing")
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
pass
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
A = tokenizer.vocab_size
A = len(__SCREAMING_SNAKE_CASE)
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A = ["aaaaa bbbbbb", "cccccccccdddddddd"]
A = tokenizer.add_tokens(__SCREAMING_SNAKE_CASE)
A = tokenizer.vocab_size
A = len(__SCREAMING_SNAKE_CASE)
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE))
self.assertEqual(__SCREAMING_SNAKE_CASE , all_size + len(__SCREAMING_SNAKE_CASE))
A = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__SCREAMING_SNAKE_CASE)
self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
A = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
A = tokenizer.add_special_tokens(__SCREAMING_SNAKE_CASE)
A = tokenizer.vocab_size
A = len(__SCREAMING_SNAKE_CASE)
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE))
self.assertEqual(__SCREAMING_SNAKE_CASE , all_size_a + len(__SCREAMING_SNAKE_CASE))
A = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__SCREAMING_SNAKE_CASE)
self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def SCREAMING_SNAKE_CASE__ (self : List[str]):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
pass
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
A = self.get_tokenizers(fast=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
A = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
A = tokenizer.convert_tokens_to_string(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(output["text"] , __SCREAMING_SNAKE_CASE)
| 57 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :Tuple = original_name.split('''.''' )[0]
UpperCamelCase :Optional[int] = key.split('''.''' )
UpperCamelCase :Optional[int] = int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
UpperCamelCase :Dict = int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
UpperCamelCase :Optional[int] = orig_block_num - offset
UpperCamelCase :Union[str, Any] = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
UpperCamelCase :Optional[Any] = OrderedDict()
UpperCamelCase , UpperCamelCase :Optional[Any] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
UpperCamelCase :Union[str, Any] = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
UpperCamelCase :str = key[: key.find('''proj''' )]
UpperCamelCase :int = key.replace(SCREAMING_SNAKE_CASE__ , F'''patch_embeddings.{total_embed_found}.''' )
UpperCamelCase :List[Any] = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
UpperCamelCase :Union[str, Any] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
UpperCamelCase :Union[str, Any] = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
UpperCamelCase :Tuple = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
UpperCamelCase :Any = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''norm1''' , '''before_norm''' )
if "norm2" in key:
UpperCamelCase :List[str] = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
UpperCamelCase :int = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
UpperCamelCase :Optional[Any] = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
UpperCamelCase :str = key.replace('''head''' , '''classifier''' )
UpperCamelCase :Optional[int] = value
return new_state_dict
def _A ( ):
UpperCamelCase :List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase :int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :Union[str, Any] = PoolFormerConfig()
# set attributes based on model_name
UpperCamelCase :List[str] = '''huggingface/label-files'''
UpperCamelCase :Optional[Any] = model_name[-3:]
UpperCamelCase :List[str] = 1000
UpperCamelCase :Optional[int] = '''imagenet-1k-id2label.json'''
UpperCamelCase :Tuple = (1, 1000)
# set config attributes
UpperCamelCase :Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :Tuple = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCamelCase :Tuple = idalabel
UpperCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
UpperCamelCase :Tuple = [2, 2, 6, 2]
UpperCamelCase :int = [64, 128, 320, 512]
UpperCamelCase :Optional[Any] = 4.0
UpperCamelCase :List[Any] = 0.9
elif size == "s24":
UpperCamelCase :Optional[int] = [4, 4, 12, 4]
UpperCamelCase :Optional[Any] = [64, 128, 320, 512]
UpperCamelCase :Dict = 4.0
UpperCamelCase :int = 0.9
elif size == "s36":
UpperCamelCase :List[Any] = [6, 6, 18, 6]
UpperCamelCase :Optional[int] = [64, 128, 320, 512]
UpperCamelCase :Any = 4.0
UpperCamelCase :Optional[Any] = 1e-6
UpperCamelCase :Any = 0.9
elif size == "m36":
UpperCamelCase :Any = [6, 6, 18, 6]
UpperCamelCase :Any = [96, 192, 384, 768]
UpperCamelCase :Any = 4.0
UpperCamelCase :Tuple = 1e-6
UpperCamelCase :List[Any] = 0.95
elif size == "m48":
UpperCamelCase :List[str] = [8, 8, 24, 8]
UpperCamelCase :int = [96, 192, 384, 768]
UpperCamelCase :Optional[int] = 4.0
UpperCamelCase :str = 1e-6
UpperCamelCase :Optional[int] = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
UpperCamelCase :str = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
UpperCamelCase :List[str] = prepare_img()
UpperCamelCase :Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
UpperCamelCase :str = torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('''cpu''' ) )
# rename keys
UpperCamelCase :Union[str, Any] = rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
UpperCamelCase :str = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
UpperCamelCase :str = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = outputs.logits
# define expected logit slices for different models
if size == "s12":
UpperCamelCase :Any = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
UpperCamelCase :List[Any] = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
UpperCamelCase :str = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
UpperCamelCase :Tuple = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
UpperCamelCase :List[str] = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 259 |
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ):
UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCamelCase :Dict = 0
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
# Consider rest of the activities
for j in range(SCREAMING_SNAKE_CASE__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
UpperCamelCase :List[str] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = [1, 3, 0, 5, 8, 5]
__snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 259 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self , __snake_case , __snake_case = None , __snake_case = None ) -> Dict:
'''simple docstring'''
super().__init__()
__a =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__a =torch.zeros(__snake_case , __snake_case )
else:
__a =None
__a =torch.nn.Parameter(__snake_case )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__snake_case , transformer=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> str:
'''simple docstring'''
__a =len(__snake_case ) if isinstance(__snake_case , __snake_case ) else 1
# get prompt text embeddings
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__a =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__a =text_input_ids[:, : self.tokenizer.model_max_length]
__a =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__a =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate text embeddings for each generation per prompt
__a =prompt_embeds.repeat_interleave(__snake_case , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__a =self.learned_classifier_free_sampling_embeddings.embeddings
__a =negative_prompt_embeds.unsqueeze(0 ).repeat(__snake_case , 1 , 1 )
else:
__a =[''] * batch_size
__a =text_input_ids.shape[-1]
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=__snake_case , truncation=__snake_case , return_tensors='pt' , )
__a =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__a =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a =negative_prompt_embeds.shape[1]
__a =negative_prompt_embeds.repeat(1 , __snake_case , 1 )
__a =negative_prompt_embeds.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __snake_case , __snake_case = 100 , __snake_case = 5.0 , __snake_case = 1.0 , __snake_case = 1 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , __snake_case = None , __snake_case = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
__a =1
elif isinstance(__snake_case , __snake_case ):
__a =len(__snake_case )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__snake_case )}' )
__a =batch_size * num_images_per_prompt
__a =guidance_scale > 1.0
__a =self._encode_prompt(__snake_case , __snake_case , __snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__snake_case )}.' )
# get the initial completely masked latents unless the user supplied it
__a =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
__a =self.transformer.num_vector_embeds - 1
__a =torch.full(__snake_case , __snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__a =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
__a =self.scheduler.timesteps.to(self.device )
__a =latents
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the sample if we are doing classifier free guidance
__a =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__a =self.transformer(__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case ).sample
if do_classifier_free_guidance:
__a , __a =model_output.chunk(2 )
__a =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__snake_case , dim=1 , keepdim=__snake_case )
__a =self.truncate(__snake_case , __snake_case )
# remove `log(0)`'s (`-inf`s)
__a =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__a =self.scheduler.step(__snake_case , timestep=__snake_case , sample=__snake_case , generator=__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
__a =self.vqvae.config.vq_embed_dim
__a =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__a =self.vqvae.quantize.get_codebook_entry(__snake_case , shape=__snake_case )
__a =self.vqvae.decode(__snake_case , force_not_quantize=__snake_case ).sample
__a =(image / 2 + 0.5).clamp(0 , 1 )
__a =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> torch.FloatTensor:
'''simple docstring'''
__a , __a =torch.sort(__snake_case , 1 , descending=__snake_case )
__a =torch.exp(__snake_case )
__a =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__a =torch.full_like(keep_mask[:, 0:1, :] , __snake_case )
__a =torch.cat((all_true, keep_mask) , dim=1 )
__a =keep_mask[:, :-1, :]
__a =keep_mask.gather(1 , indices.argsort(1 ) )
__a =log_p_x_0.clone()
__a =-torch.inf # -inf = log(0)
return rv
| 308 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 308 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]:
"""simple docstring"""
A : Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
A : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : List[str] = parent
A : str = batch_size
A : Optional[Any] = seq_length
A : List[str] = is_training
A : List[Any] = use_input_mask
A : Optional[Any] = use_token_type_ids
A : Optional[Any] = use_labels
A : List[str] = vocab_size
A : Dict = hidden_size
A : Union[str, Any] = num_hidden_layers
A : Tuple = num_attention_heads
A : Dict = intermediate_size
A : Tuple = hidden_act
A : List[Any] = hidden_dropout_prob
A : Tuple = attention_probs_dropout_prob
A : int = max_position_embeddings
A : int = type_vocab_size
A : str = type_sequence_label_size
A : int = initializer_range
A : Optional[Any] = num_labels
A : Optional[int] = num_choices
A : Tuple = scope
A : Dict = embedding_size
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Union[str, Any] = None
if self.use_input_mask:
A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A : Dict = None
if self.use_token_type_ids:
A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Tuple = None
A : str = None
A : Any = None
if self.use_labels:
A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
A : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : int = TFMobileBertModel(config=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : List[str] = model(SCREAMING_SNAKE_CASE )
A : str = [input_ids, input_mask]
A : List[str] = model(SCREAMING_SNAKE_CASE )
A : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = TFMobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE )
A : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE )
A : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : str = TFMobileBertForPreTraining(config=SCREAMING_SNAKE_CASE )
A : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : str = self.num_labels
A : Tuple = TFMobileBertForSequenceClassification(config=SCREAMING_SNAKE_CASE )
A : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Dict = self.num_choices
A : Dict = TFMobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
A : Tuple = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : Union[str, Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Dict = self.num_labels
A : Optional[Any] = TFMobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE )
A : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = TFMobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
A : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Any = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[str] = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Any = config_and_inputs
A : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[int] = TFMobileBertModelTest.TFMobileBertModelTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
A : Optional[int] = TFMobileBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_tf
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
A : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
A : str = model(SCREAMING_SNAKE_CASE )[0]
A : Dict = [1, 6, 30522]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Union[str, Any] = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
| 3 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__(__snake_case ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__() -> Any:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' ,[2, -1] )
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {'''a''': 1, '''b''': 2}
lowerCamelCase__ = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCamelCase__ = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCamelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {'''a''': 2, '''b''': 3}
lowerCamelCase__ = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCamelCase__ = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCamelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
| 209 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowercase : Optional[Any] = 'src/transformers'
lowercase : Union[str, Any] = 'docs/source/en'
lowercase : Optional[int] = '.'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]) -> Any:
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n") as f:
__UpperCamelCase : List[Any] = f.readlines()
# Find the start prompt.
__UpperCamelCase : str = 0
while not lines[start_index].startswith(_lowerCamelCase):
start_index += 1
start_index += 1
__UpperCamelCase : List[Any] = start_index
while not lines[end_index].startswith(_lowerCamelCase):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowercase : List[str] = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
lowercase : List[Any] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowercase : Union[str, Any] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase : List[Any] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Any = direct_transformers_import(TRANSFORMERS_PATH)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , _lowerCamelCase)
return [m.group(0) for m in matches]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = 2 if text == "✅" or text == "❌" else len(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = (width - text_length) // 2
__UpperCamelCase : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCamelCase : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__UpperCamelCase : str = {name: config.replace("Config" , "") for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__UpperCamelCase : Tuple = collections.defaultdict(_lowerCamelCase)
__UpperCamelCase : List[str] = collections.defaultdict(_lowerCamelCase)
__UpperCamelCase : int = collections.defaultdict(_lowerCamelCase)
__UpperCamelCase : Optional[int] = collections.defaultdict(_lowerCamelCase)
__UpperCamelCase : str = collections.defaultdict(_lowerCamelCase)
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowerCamelCase):
__UpperCamelCase : Union[str, Any] = None
if attr_name.endswith("Tokenizer"):
__UpperCamelCase : Union[str, Any] = slow_tokenizers
__UpperCamelCase : List[Any] = attr_name[:-9]
elif attr_name.endswith("TokenizerFast"):
__UpperCamelCase : Optional[Any] = fast_tokenizers
__UpperCamelCase : int = attr_name[:-13]
elif _re_tf_models.match(_lowerCamelCase) is not None:
__UpperCamelCase : int = tf_models
__UpperCamelCase : List[Any] = _re_tf_models.match(_lowerCamelCase).groups()[0]
elif _re_flax_models.match(_lowerCamelCase) is not None:
__UpperCamelCase : List[Any] = flax_models
__UpperCamelCase : Optional[Any] = _re_flax_models.match(_lowerCamelCase).groups()[0]
elif _re_pt_models.match(_lowerCamelCase) is not None:
__UpperCamelCase : str = pt_models
__UpperCamelCase : Tuple = _re_pt_models.match(_lowerCamelCase).groups()[0]
if lookup_dict is not None:
while len(_lowerCamelCase) > 0:
if attr_name in model_name_to_prefix.values():
__UpperCamelCase : str = True
break
# Try again after removing the last word in the name
__UpperCamelCase : int = "".join(camel_case_split(_lowerCamelCase)[:-1])
# Let's build that table!
__UpperCamelCase : Any = list(model_name_to_config.keys())
model_names.sort(key=str.lower)
__UpperCamelCase : str = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__UpperCamelCase : List[str] = [len(_lowerCamelCase) + 2 for c in columns]
__UpperCamelCase : List[Any] = max([len(_lowerCamelCase) for name in model_names]) + 2
# Build the table per se
__UpperCamelCase : Optional[int] = "|" + "|".join([_center_text(_lowerCamelCase , _lowerCamelCase) for c, w in zip(_lowerCamelCase , _lowerCamelCase)]) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths]) + "|\n"
__UpperCamelCase : Any = {True: "✅", False: "❌"}
for name in model_names:
__UpperCamelCase : Optional[int] = model_name_to_prefix[name]
__UpperCamelCase : Optional[int] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowerCamelCase , _lowerCamelCase) for l, w in zip(_lowerCamelCase , _lowerCamelCase)]) + "|\n"
return table
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str]=False) -> List[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = _find_text_in_file(
filename=os.path.join(_lowerCamelCase , "index.md") , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
__UpperCamelCase : int = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowerCamelCase , "index.md") , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:])
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.")
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase : Any = parser.parse_args()
check_model_table(args.fix_and_overwrite) | 151 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(__lowercase)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Any , **a :Union[str, Any] ) -> Union[str, Any]:
super().__init__(**a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :Any , a :Union[str, List[str], "Image", List["Image"]] , **a :Tuple ) -> List[str]:
return super().__call__(a , **a )
def _lowerCamelCase ( self :List[Any] , **a :List[str] ) -> List[Any]:
__UpperCamelCase : List[Any] = {}
if "candidate_labels" in kwargs:
__UpperCamelCase : Optional[int] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCamelCase : List[str] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _lowerCamelCase ( self :List[str] , a :Optional[int] , a :List[str]=None , a :Dict="This is a photo of {}." ) -> Any:
__UpperCamelCase : Dict = load_image(a )
__UpperCamelCase : Any = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCamelCase : str = candidate_labels
__UpperCamelCase : List[Any] = [hypothesis_template.format(a ) for x in candidate_labels]
__UpperCamelCase : List[Any] = self.tokenizer(a , return_tensors=self.framework , padding=a )
__UpperCamelCase : Any = [text_inputs]
return inputs
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[Any] ) -> List[Any]:
__UpperCamelCase : List[str] = model_inputs.pop("candidate_labels" )
__UpperCamelCase : Dict = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , a ):
__UpperCamelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
__UpperCamelCase : int = text_inputs[0][0]
__UpperCamelCase : str = self.model(**a , **a )
__UpperCamelCase : List[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self :List[Any] , a :List[Any] ) -> Tuple:
__UpperCamelCase : Any = model_outputs.pop("candidate_labels" )
__UpperCamelCase : Optional[Any] = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCamelCase : int = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCamelCase : List[str] = probs.tolist()
if not isinstance(a , a ):
__UpperCamelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCamelCase : Optional[int] = stable_softmax(a , axis=-1 )
__UpperCamelCase : Dict = probs.numpy().tolist()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCamelCase : Tuple = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(a , a ) , key=lambda a : -x[0] )
]
return result | 151 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A__ ( lowerCamelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "width_multiplier" ) )
class A__ :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str]=1_3 , lowerCAmelCase__ : List[Any]=6_4 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : Any="swish" , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : List[Any]=3_2 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=1_0 , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Optional[Any]=0.25 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Dict=0.0 , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Tuple = image_size
_UpperCAmelCase : List[Any] = patch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : Tuple = make_divisible(5_1_2 * width_multiplier , divisor=8 )
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : List[str] = conv_kernel_size
_UpperCAmelCase : str = output_stride
_UpperCAmelCase : Tuple = classifier_dropout_prob
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : Optional[int] = is_training
_UpperCAmelCase : List[Any] = num_labels
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Any = scope
_UpperCAmelCase : List[Any] = width_multiplier
_UpperCAmelCase : Any = ffn_dropout
_UpperCAmelCase : Optional[Any] = attn_dropout
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Any = None
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = MobileViTVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.num_labels
_UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase_ : str = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : str = False
UpperCamelCase_ : Union[str, Any] = False
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileViTVaModelTester(self )
_UpperCAmelCase : List[Any] = MobileViTVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ):
_UpperCAmelCase : Optional[int] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : List[Any] = outputs.hidden_states
_UpperCAmelCase : Union[str, Any] = 5
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCAmelCase : int = 2
for i in range(len(lowerCAmelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Any = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Tuple = MobileViTVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
lowerCAmelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : int = prepare_img()
_UpperCAmelCase : Any = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCAmelCase__ )
# verify the logits
_UpperCAmelCase : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Any = model.to(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Dict = prepare_img()
_UpperCAmelCase : int = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Dict = model(**lowerCAmelCase__ )
_UpperCAmelCase : List[str] = outputs.logits
# verify the logits
_UpperCAmelCase : int = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=lowerCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : List[Any] = model.to(lowerCAmelCase__ )
_UpperCAmelCase : str = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Optional[int] = prepare_img()
_UpperCAmelCase : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase__ )
_UpperCAmelCase : int = outputs.logits.detach().cpu()
_UpperCAmelCase : str = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(5_0, 6_0)] )
_UpperCAmelCase : int = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ )
_UpperCAmelCase : str = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ ) | 145 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list ):
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(_lowerCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_lowerCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 112 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __magic_name__ ( __UpperCAmelCase ):
__A : List[str] = "gpt_neox"
def __init__( self : str , snake_case__ : str=5_0_4_3_2 , snake_case__ : Union[str, Any]=6_1_4_4 , snake_case__ : str=4_4 , snake_case__ : List[Any]=6_4 , snake_case__ : Any=2_4_5_7_6 , snake_case__ : List[Any]="gelu" , snake_case__ : Tuple=0.25 , snake_case__ : Union[str, Any]=1_0_0_0_0 , snake_case__ : List[Any]=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : List[Any]=0.1 , snake_case__ : Any=2_0_4_8 , snake_case__ : Any=0.02 , snake_case__ : Union[str, Any]=1e-5 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=0 , snake_case__ : str=2 , snake_case__ : List[Any]=False , snake_case__ : Any=True , snake_case__ : Optional[Any]=None , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowercase :int = vocab_size
lowercase :int = max_position_embeddings
lowercase :Any = hidden_size
lowercase :Optional[Any] = num_hidden_layers
lowercase :List[str] = num_attention_heads
lowercase :str = intermediate_size
lowercase :List[str] = hidden_act
lowercase :Optional[Any] = rotary_pct
lowercase :Any = rotary_emb_base
lowercase :List[Any] = attention_dropout
lowercase :Any = hidden_dropout
lowercase :Dict = classifier_dropout
lowercase :Optional[Any] = initializer_range
lowercase :Optional[int] = layer_norm_eps
lowercase :Tuple = use_cache
lowercase :Tuple = tie_word_embeddings
lowercase :List[Any] = use_parallel_residual
lowercase :Tuple = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
lowercase :Optional[int] = self.rope_scaling.get('''type''' , snake_case__ )
lowercase :str = self.rope_scaling.get('''factor''' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 370 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : str = "imagegpt"
__A : str = ["past_key_values"]
__A : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=5_1_2 + 1 , snake_case__ : Optional[int]=3_2 * 3_2 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[str]=2_4 , snake_case__ : Any=8 , snake_case__ : str=None , snake_case__ : Any="quick_gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=1e-5 , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=True , snake_case__ : Dict=True , snake_case__ : str=False , snake_case__ : Optional[int]=False , snake_case__ : Union[str, Any]=False , **snake_case__ : Union[str, Any] , ):
'''simple docstring'''
lowercase :int = vocab_size
lowercase :str = n_positions
lowercase :List[str] = n_embd
lowercase :int = n_layer
lowercase :List[str] = n_head
lowercase :Tuple = n_inner
lowercase :Tuple = activation_function
lowercase :Optional[Any] = resid_pdrop
lowercase :Tuple = embd_pdrop
lowercase :Dict = attn_pdrop
lowercase :List[Any] = layer_norm_epsilon
lowercase :List[Any] = initializer_range
lowercase :List[Any] = scale_attn_weights
lowercase :Dict = use_cache
lowercase :List[str] = scale_attn_by_inverse_layer_idx
lowercase :List[str] = reorder_and_upcast_attn
lowercase :Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case__ , **snake_case__ )
class __magic_name__ ( __UpperCAmelCase ):
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __snake_case ( self : Union[str, Any] , snake_case__ : "FeatureExtractionMixin" , snake_case__ : int = 1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 3_2 , snake_case__ : int = 3_2 , ):
'''simple docstring'''
lowercase :Union[str, Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase :List[str] = dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) )
return inputs
| 172 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger()
@dataclass
class _snake_case :
lowerCAmelCase_ : nn.Module
lowerCAmelCase_ : List[nn.Module] = field(default_factory=lowercase_ )
lowerCAmelCase_ : list = field(default_factory=lowercase_ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(a__ , nn.Convad ) or isinstance(a__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a__ )
def __call__( self , a__ ) -> List[str]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a__ )
[x.remove() for x in self.handles]
return self
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return list(filter(lambda a__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _snake_case :
lowerCAmelCase_ : nn.Module
lowerCAmelCase_ : nn.Module
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : List = field(default_factory=lowercase_ )
lowerCAmelCase_ : List = field(default_factory=lowercase_ )
lowerCAmelCase_ : bool = True
def __call__( self , a__ ) -> Dict:
'''simple docstring'''
snake_case_ = Tracker(self.dest )(a__ ).parametrized
snake_case_ = Tracker(self.src )(a__ ).parametrized
snake_case_ = list(filter(lambda a__ : type(a__ ) not in self.src_skip , a__ ) )
snake_case_ = list(filter(lambda a__ : type(a__ ) not in self.dest_skip , a__ ) )
if len(a__ ) != len(a__ ) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(a__ )} operations while'
F' destination module has {len(a__ )}.' )
for dest_m, src_m in zip(a__ , a__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
class _snake_case ( nn.Module ):
def __init__( self , a__ ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F'Unexpected layer name {k}'
snake_case_ = len(a__ ) + 1
feature_blocks.append((F'res{block_index}', v) )
snake_case_ = nn.ModuleDict(a__ )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
return get_trunk_forward_outputs(
a__ , out_feat_keys=a__ , feature_blocks=self._feature_blocks , )
class _snake_case ( lowercase_ ):
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , a__ ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
snake_case_ = self.convert_name_to_timm(a__ )
snake_case_ = partial(lambda: (timm.create_model(a__ , pretrained=a__ ).eval(), None) )
else:
snake_case_ = super().__getitem__(a__ )
return val
class _snake_case ( lowercase_ ):
def __getitem__( self , a__ ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
snake_case_ = RegNetModel
else:
snake_case_ = RegNetForImageClassification
return val
def UpperCamelCase_( snake_case : Tuple , snake_case : Dict , snake_case : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
snake_case_ = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def UpperCamelCase_( snake_case : str , snake_case : Callable[[], nn.Module] , snake_case : Callable[[], nn.Module] , snake_case : RegNetConfig , snake_case : Path , snake_case : bool = True , ):
'''simple docstring'''
print(f'Converting {name}...' )
with torch.no_grad():
snake_case_ , snake_case_ = from_model_func()
snake_case_ = our_model_func(snake_case ).eval()
snake_case_ = ModuleTransfer(src=snake_case , dest=snake_case , raise_if_mismatch=snake_case )
snake_case_ = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(snake_case )
if from_state_dict is not None:
snake_case_ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
snake_case_ = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
snake_case_ = manually_copy_vissl_head(snake_case , our_model.state_dict() , snake_case )
our_model.load_state_dict(snake_case )
snake_case_ = our_model(snake_case , output_hidden_states=snake_case )
snake_case_ = (
our_outputs.logits if isinstance(snake_case , snake_case ) else our_outputs.last_hidden_state
)
snake_case_ = from_model(snake_case )
snake_case_ = from_output[-1] if type(snake_case ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
snake_case_ = our_outputs.hidden_states[-1]
assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=snake_case , )
snake_case_ = 2_2_4 if "seer" not in name else 3_8_4
# we can use the convnext one
snake_case_ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=snake_case )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=snake_case , )
print(f'Pushed {name}' )
def UpperCamelCase_( snake_case : Path , snake_case : str = None , snake_case : bool = True ):
'''simple docstring'''
snake_case_ = "imagenet-1k-id2label.json"
snake_case_ = 1_0_0_0
snake_case_ = (1, num_labels)
snake_case_ = "huggingface/label-files"
snake_case_ = num_labels
snake_case_ = json.load(open(cached_download(hf_hub_url(snake_case , snake_case , repo_type="dataset" ) ) , "r" ) )
snake_case_ = {int(snake_case ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
snake_case_ = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
snake_case_ = NameToOurModelFuncMap()
snake_case_ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(snake_case : str , snake_case : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
snake_case_ = torch.hub.load_state_dict_from_url(snake_case , model_dir=str(snake_case ) , map_location="cpu" )
snake_case_ = model_func()
# check if we have a head, if yes add it
snake_case_ = files["classy_state_dict"]["base_model"]["model"]
snake_case_ = model_state_dict["trunk"]
model.load_state_dict(snake_case )
return model.eval(), model_state_dict["heads"]
# pretrained
snake_case_ = partial(
snake_case , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case_ = partial(
snake_case , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case_ = partial(
snake_case , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case_ = partial(
snake_case , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
snake_case_ = partial(
snake_case , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case_ = partial(
snake_case , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case_ = partial(
snake_case , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case_ = partial(
snake_case , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , snake_case , snake_case , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , snake_case , snake_case , snake_case , )
return config, expected_shape
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 85 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = set()
lowerCAmelCase__ = []
def parse_line(lowerCAmelCase__ ):
for line in fp:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCAmelCase__ = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(UpperCamelCase__ ) > 0:
lowerCAmelCase__ = '\n'.join(UpperCamelCase__ )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(UpperCamelCase__ )
buffer.clear()
continue
else:
lowerCAmelCase__ = line.strip()
buffer.append(UpperCamelCase__ )
if from_gh:
for filename in os.listdir(UpperCamelCase__ ):
lowerCAmelCase__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(UpperCamelCase__ ) as fp:
parse_line(UpperCamelCase__ )
else:
try:
with zipfile.ZipFile(UpperCamelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(UpperCamelCase__ ) as fp:
parse_line(UpperCamelCase__ )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = set()
lowerCAmelCase__ = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for p in os.listdir(UpperCamelCase__ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(UpperCamelCase__ , UpperCamelCase__ ) )
return selected_warnings
if __name__ == "__main__":
def __lowerCamelCase ( lowerCAmelCase__ ):
return values.split(',' )
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCAmelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCAmelCase__ = extract_warnings(args.output_dir, args.targets)
lowerCAmelCase__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 363 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = MvpTokenizer
lowerCAmelCase : Optional[int] = MvpTokenizerFast
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Dict = filter_roberta_detectors
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().setUp()
lowercase__ : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase__ : Tuple = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase__ : Tuple = {'''unk_token''': '''<unk>'''}
lowercase__ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def UpperCAmelCase ( self : Union[str, Any] ,**_snake_case : List[str] ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCAmelCase ( self : Any ,**_snake_case : List[str] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCAmelCase ( self : Tuple ,_snake_case : Any ) -> Dict:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(_snake_case ,max_length=len(_snake_case ) ,padding=_snake_case ,return_tensors='''pt''' )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(_snake_case ,_snake_case )
# Test that special tokens are reset
@require_torch
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Tuple = tokenizer(_snake_case ,padding=_snake_case ,return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' ,_snake_case )
self.assertIn('''attention_mask''' ,_snake_case )
self.assertNotIn('''labels''' ,_snake_case )
self.assertNotIn('''decoder_attention_mask''' ,_snake_case )
@require_torch
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[str] = tokenizer(text_target=_snake_case ,max_length=32 ,padding='''max_length''' ,return_tensors='''pt''' )
self.assertEqual(32 ,targets['''input_ids'''].shape[1] )
@require_torch
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Tuple = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] ,padding=_snake_case ,truncation=_snake_case ,return_tensors='''pt''' )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(batch.input_ids.shape ,(2, 1_024) )
@require_torch
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = ['''A long paragraph for summarization.''']
lowercase__ : Union[str, Any] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Any = tokenizer(_snake_case ,text_target=_snake_case ,return_tensors='''pt''' )
lowercase__ : str = inputs['''input_ids''']
lowercase__ : Union[str, Any] = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_snake_case ,**_snake_case )
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(_snake_case ,**_snake_case )
lowercase__ : Tuple = '''A, <mask> AllenNLP sentence.'''
lowercase__ : List[str] = tokenizer_r.encode_plus(_snake_case ,add_special_tokens=_snake_case ,return_token_type_ids=_snake_case )
lowercase__ : Dict = tokenizer_p.encode_plus(_snake_case ,add_special_tokens=_snake_case ,return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) ,sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) ,sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) ,)
lowercase__ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_snake_case ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_snake_case ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 16 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a__ = get_tests_dir('''fixtures''')
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
_a : Dict = mock.Mock()
_a : List[Any] = 5_0_0
_a : Optional[Any] = {}
_a : Any = HTTPError
_a : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_a : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_a ) as mock_head:
_a : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
_a : List[str] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls ) -> Tuple:
_a : Optional[int] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls ) -> int:
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __lowercase ( self ) -> str:
_a : int = WavaVecaFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
_a : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_a , repo_id='''test-feature-extractor''' , push_to_hub=_a , use_auth_token=self._token )
_a : Dict = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def __lowercase ( self ) -> List[Any]:
_a : int = WavaVecaFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
_a : List[str] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_a , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=_a , use_auth_token=self._token )
_a : str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def __lowercase ( self ) -> Union[str, Any]:
CustomFeatureExtractor.register_for_auto_class()
_a : Union[str, Any] = CustomFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
_a : str = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_a )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 15 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 1 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :List[str]=None ) -> int:
'''simple docstring'''
if "." in tensor_name:
lowercase = tensor_name.split(""".""" )
for split in splits[:-1]:
lowercase = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
lowercase = new_module
lowercase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
lowercase = tensor_name in module._buffers
lowercase = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
lowercase = False
lowercase = False
if is_buffer or not is_bitsandbytes_available():
lowercase = False
lowercase = False
else:
lowercase = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase = old_value.to(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
lowercase = value.to("""cpu""" )
if value.dtype == torch.inta:
lowercase = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
lowercase = torch.tensor(lowerCAmelCase__ , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCAmelCase__ ) and fpaa_statistics is None:
lowercase = new_value.T
lowercase = old_value.__dict__
if is_abit:
lowercase = bnb.nn.IntaParams(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ , **lowerCAmelCase__ ).to(lowerCAmelCase__ )
elif is_abit:
lowercase = bnb.nn.Paramsabit(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ , **lowerCAmelCase__ ).to(lowerCAmelCase__ )
lowercase = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(lowerCAmelCase__ ) )
else:
if value is None:
lowercase = old_value.to(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
lowercase = value.to(lowerCAmelCase__ )
else:
lowercase = torch.tensor(lowerCAmelCase__ , device=lowerCAmelCase__ )
if is_buffer:
lowercase = new_value
else:
lowercase = nn.Parameter(lowerCAmelCase__ , requires_grad=old_value.requires_grad )
lowercase = new_value
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :Dict=False ) -> Dict:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase = []
current_key_name.append(lowerCAmelCase__ )
if (isinstance(lowerCAmelCase__ , nn.Linear ) or isinstance(lowerCAmelCase__ , lowerCAmelCase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(lowerCAmelCase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase , lowercase = module.weight.shape
else:
lowercase = module.in_features
lowercase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase = bnb.nn.LinearabitLt(
lowerCAmelCase__ , lowerCAmelCase__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase = bnb.nn.Linearabit(
lowerCAmelCase__ , lowerCAmelCase__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase = True
# Store the module class in case we need to transpose the weight later
lowercase = type(lowerCAmelCase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCAmelCase__ )
if len(list(module.children() ) ) > 0:
lowercase , lowercase = _replace_with_bnb_linear(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_been_replaced=lowerCAmelCase__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :int=None ) -> Tuple:
'''simple docstring'''
lowercase = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
lowercase , lowercase = _replace_with_bnb_linear(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def UpperCAmelCase__ ( *lowerCAmelCase__ :int , **lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , lowerCAmelCase__ , )
return replace_with_bnb_linear(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCAmelCase__ ( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Optional[int] ) -> str:
'''simple docstring'''
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , lowerCAmelCase__ , )
return set_module_quantized_tensor_to_device(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
lowercase = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase = find_tied_parameters(lowerCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase = sum(lowerCAmelCase__ , [] )
lowercase = len(lowerCAmelCase__ ) > 0
# Check if it is a base model
lowercase = not hasattr(lowerCAmelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase = list(model.named_children() )
lowercase = [list_modules[-1][0]]
# add last module together with tied weights
lowercase = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
lowercase = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ )
# remove ".weight" from the keys
lowercase = [""".weight""", """.bias"""]
lowercase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase = name.replace(lowerCAmelCase__ , """""" )
filtered_module_names.append(lowerCAmelCase__ )
return filtered_module_names
| 197 | """simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Path , lowerCAmelCase__ :str = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :str = None , ) -> Optional[int]:
'''simple docstring'''
if config_name_or_path is None:
lowercase = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
lowercase = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowercase = question_encoder_name_or_path
lowercase = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
lowercase = RagConfig.from_pretrained(lowerCAmelCase__ )
lowercase = AutoConfig.from_pretrained(lowerCAmelCase__ )
lowercase = AutoConfig.from_pretrained(lowerCAmelCase__ )
lowercase = gen_config
lowercase = question_encoder_config
lowercase = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase__ , lowerCAmelCase__ , config=lowerCAmelCase__ )
rag_model.save_pretrained(lowerCAmelCase__ )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase__ )
# Save tokenizers.
lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
__lowerCAmelCase : int =argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCAmelCase : List[str] =parser.parse_args()
__lowerCAmelCase : Dict =Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 197 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( _lowerCamelCase , _lowerCamelCase):
A_ : Dict = 'maskformer-swin'
A_ : str = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _SCREAMING_SNAKE_CASE=2_24 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = image_size
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : Optional[Any] = embed_dim
__lowerCAmelCase : List[str] = depths
__lowerCAmelCase : List[str] = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = num_heads
__lowerCAmelCase : Optional[Any] = window_size
__lowerCAmelCase : Tuple = mlp_ratio
__lowerCAmelCase : Optional[int] = qkv_bias
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : Tuple = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : List[Any] = use_absolute_embeddings
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : Tuple = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase : Optional[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
__lowerCAmelCase : int = ['stem'] + [f"stage{idx}" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
__lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names ) | 364 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase : Union[str, Any] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__lowerCAmelCase : List[str] = F"{src_lang}-{tgt_lang}"
__lowerCAmelCase : Tuple = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__lowerCAmelCase : Any = os.path.join(_UpperCamelCase , 'README.md' )
print(F"Generating {path}" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split("""-""")
lowerCamelCase__ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 182 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43 | import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __lowerCAmelCase :
def __init__( self : int , A : str , A : Any=14 , A : Any=7 , A : int=True , A : Tuple=True , A : Optional[int]=True , A : Optional[Any]=True , A : Optional[int]=True , A : str=99 , A : List[Any]=32 , A : Dict=5 , A : List[str]=4 , A : Optional[Any]=37 , A : Optional[int]="gelu" , A : Union[str, Any]=0.1 , A : str=0.1 , A : Optional[int]=5_12 , A : Any=16 , A : List[Any]=2 , A : Union[str, Any]=0.0_2 , A : List[str]=3 , A : Any=4 , A : Optional[int]=None , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = use_mc_token_ids
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
if self.use_mc_token_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowerCamelCase ( self : Optional[int] , A : Optional[int] , A : Dict , A : Tuple , A : Optional[Any] , A : Optional[int] , *A : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = CTRLModel(config=A)
model.to(A)
model.eval()
model(A , token_type_ids=A , head_mask=A)
model(A , token_type_ids=A)
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values) , config.n_layer)
def _lowerCamelCase ( self : List[Any] , A : str , A : Optional[Any] , A : int , A : Optional[int] , A : List[str] , *A : Tuple) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = CTRLLMHeadModel(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , token_type_ids=A , labels=A)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def _lowerCamelCase ( self : Dict , A : Optional[int] , A : int , A : Optional[int] , A : Union[str, Any] , *A : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CTRLForSequenceClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = model(A , token_type_ids=A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
@require_torch
class __lowerCAmelCase ( A , A , A , unittest.TestCase ):
UpperCamelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCamelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCamelCase = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : str , A : Dict , A : Tuple , A : List[str] , A : str , A : Optional[Any]) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCAmelCase = CTRLModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , n_embd=37)
def _lowerCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*A)
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CTRLModel.from_pretrained(A)
self.assertIsNotNone(A)
@unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :)
def _lowerCamelCase ( self : List[str]) -> str:
"""simple docstring"""
pass
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = CTRLLMHeadModel.from_pretrained('ctrl')
model.to(A)
_UpperCAmelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=A) # Legal the president is
_UpperCAmelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_UpperCAmelCase = model.generate(A , do_sample=A)
self.assertListEqual(output_ids[0].tolist() , A)
| 367 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPFeatureExtractor"]
UpperCAmelCase__ = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( lowercase ):
lowercase__ = (DEISMultistepScheduler,)
lowercase__ = (("""num_inference_steps""", 25),)
def UpperCamelCase_ ( self : Dict ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**lowerCamelCase__ )
return config
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Tuple=0 ,**lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Dict = dict(self.forward_default_kwargs )
_UpperCamelCase : int = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
_UpperCamelCase : int = self.dummy_sample
_UpperCamelCase : Union[str, Any] = 0.1 * sample
_UpperCamelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
_UpperCamelCase : Optional[int] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
_UpperCamelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
_UpperCamelCase : Tuple = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
_UpperCamelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCamelCase , _UpperCamelCase : Any = sample, sample
for t in range(lowerCamelCase__ ,time_step + scheduler.config.solver_order + 1 ):
_UpperCamelCase : int = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
_UpperCamelCase : Optional[Any] = new_scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = dict(self.forward_default_kwargs )
_UpperCamelCase : List[Any] = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
_UpperCamelCase : int = self.dummy_sample
_UpperCamelCase : str = 0.1 * sample
_UpperCamelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : Optional[int] = self.get_scheduler_config()
_UpperCamelCase : Tuple = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCamelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCamelCase : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCamelCase : Optional[int] = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
_UpperCamelCase : List[Any] = new_scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int=None ,**lowerCamelCase__ : Any ):
'''simple docstring'''
if scheduler is None:
_UpperCamelCase : Any = self.scheduler_classes[0]
_UpperCamelCase : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
_UpperCamelCase : str = scheduler_class(**lowerCamelCase__ )
_UpperCamelCase : List[Any] = self.scheduler_classes[0]
_UpperCamelCase : str = self.get_scheduler_config(**lowerCamelCase__ )
_UpperCamelCase : int = scheduler_class(**lowerCamelCase__ )
_UpperCamelCase : Dict = 10
_UpperCamelCase : Optional[int] = self.dummy_model()
_UpperCamelCase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Dict = model(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : List[str] = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Any = dict(self.forward_default_kwargs )
_UpperCamelCase : Dict = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
_UpperCamelCase : Optional[int] = scheduler_class(**lowerCamelCase__ )
_UpperCamelCase : str = self.dummy_sample
_UpperCamelCase : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
_UpperCamelCase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCamelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_UpperCamelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCamelCase : Union[str, Any] = scheduler.timesteps[5]
_UpperCamelCase : int = scheduler.timesteps[6]
_UpperCamelCase : Tuple = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
_UpperCamelCase : str = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCamelCase : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
_UpperCamelCase : Any = self.full_loop(scheduler=lowerCamelCase__ )
_UpperCamelCase : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
_UpperCamelCase : str = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase : Dict = self.full_loop(scheduler=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ ,prediction_type=lowerCamelCase__ ,sample_max_value=lowerCamelCase__ ,algorithm_type='deis' ,solver_order=lowerCamelCase__ ,solver_type=lowerCamelCase__ ,)
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ ,solver_type=lowerCamelCase__ ,prediction_type=lowerCamelCase__ ,algorithm_type=lowerCamelCase__ ,)
_UpperCamelCase : int = self.full_loop(
solver_order=lowerCamelCase__ ,solver_type=lowerCamelCase__ ,prediction_type=lowerCamelCase__ ,algorithm_type=lowerCamelCase__ ,)
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ ,time_step=0 )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : str = self.full_loop()
_UpperCamelCase : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.full_loop(prediction_type='v_prediction' )
_UpperCamelCase : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1E-3
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.scheduler_classes[0]
_UpperCamelCase : List[Any] = self.get_scheduler_config(thresholding=lowerCamelCase__ ,dynamic_thresholding_ratio=0 )
_UpperCamelCase : Any = scheduler_class(**lowerCamelCase__ )
_UpperCamelCase : Dict = 10
_UpperCamelCase : List[Any] = self.dummy_model()
_UpperCamelCase : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Optional[Any] = model(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : List[Any] = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 83 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__=0.01 , snake_case__=1_000 ):
"""simple docstring"""
lowerCAmelCase : List[Any] = p_stop
lowerCAmelCase : Optional[Any] = max_length
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Tuple = False
while not stop and count < self.max_length:
yield count
count += 1
lowerCAmelCase : Dict = random.random() < self.p_stop
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False , snake_case__=True ):
"""simple docstring"""
lowerCAmelCase : Dict = [
BatchSamplerShard(snake_case__ , 2 , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
for i in range(2 )
]
lowerCAmelCase : Any = [list(snake_case__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(snake_case__ ) for shard in batch_sampler_shards] , [len(snake_case__ ) for e in expected] )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase : List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[int] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[int] = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase : Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase : int = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : int = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowerCAmelCase : Tuple = [BatchSamplerShard(snake_case__ , 2 , snake_case__ , even_batches=snake_case__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=False , snake_case__=2 , snake_case__=False ):
"""simple docstring"""
random.seed(snake_case__ )
lowerCAmelCase : List[str] = list(snake_case__ )
lowerCAmelCase : Optional[int] = [
IterableDatasetShard(
snake_case__ , batch_size=snake_case__ , drop_last=snake_case__ , num_processes=snake_case__ , process_index=snake_case__ , split_batches=snake_case__ , )
for i in range(snake_case__ )
]
lowerCAmelCase : str = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(snake_case__ )
iterable_dataset_lists.append(list(snake_case__ ) )
lowerCAmelCase : List[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowerCAmelCase : Tuple = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
self.assertTrue(len(snake_case__ ) % shard_batch_size == 0 )
lowerCAmelCase : List[Any] = []
for idx in range(0 , len(snake_case__ ) , snake_case__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(snake_case__ ) < len(snake_case__ ):
reference += reference
self.assertListEqual(snake_case__ , reference[: len(snake_case__ )] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = 42
lowerCAmelCase : Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
# Edge case with a very small dataset
lowerCAmelCase : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = BatchSampler(range(16 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[Any] = SkipBatchSampler(snake_case__ , 2 )
self.assertListEqual(list(snake_case__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = DataLoader(list(range(16 ) ) , batch_size=4 )
lowerCAmelCase : Optional[int] = skip_first_batches(snake_case__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase__ ( self ):
"""simple docstring"""
Accelerator()
lowerCAmelCase : Dict = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 108 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( lowerCamelCase ):
return EnvironmentCommand()
def UpperCAmelCase__ ( lowerCamelCase ):
return EnvironmentCommand(args.accelerate_config_file )
class __lowerCAmelCase ( A_):
@staticmethod
def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: ArgumentParser ):
lowercase :Optional[Any] = parser.add_parser("env" )
download_parser.set_defaults(func=snake_case__ )
download_parser.add_argument(
"--accelerate-config_file" , default=snake_case__ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=snake_case__ )
def __init__( self: List[Any] , _lowerCAmelCase: Any , *_lowerCAmelCase: Tuple ):
lowercase :str = accelerate_config_file
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :List[str] = "not installed"
if is_safetensors_available():
import safetensors
lowercase :Union[str, Any] = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
lowercase :Union[str, Any] = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
lowercase :Tuple = "not installed"
lowercase :Dict = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowercase :Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(snake_case__ ):
lowercase :Dict = load_config_from_file(self._accelerate_config_file ).to_dict()
lowercase :Any = (
"\n".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(snake_case__ , snake_case__ )
else F"\t{accelerate_config}"
)
lowercase :Dict = "not installed"
lowercase :List[str] = "NA"
if is_torch_available():
import torch
lowercase :List[str] = torch.__version__
lowercase :Union[str, Any] = torch.cuda.is_available()
lowercase :Optional[int] = "not installed"
lowercase :Any = "NA"
if is_tf_available():
import tensorflow as tf
lowercase :Union[str, Any] = tf.__version__
try:
# deprecated in v2.1
lowercase :str = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowercase :Tuple = bool(tf.config.list_physical_devices("GPU" ) )
lowercase :Any = "not installed"
lowercase :List[str] = "not installed"
lowercase :Union[str, Any] = "not installed"
lowercase :List[str] = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
lowercase :Union[str, Any] = flax.__version__
lowercase :Tuple = jax.__version__
lowercase :int = jaxlib.__version__
lowercase :Optional[Any] = jax.lib.xla_bridge.get_backend().platform
lowercase :int = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F"{safetensors_version}",
"Accelerate version": F"{accelerate_version}",
"Accelerate config": F"{accelerate_config_str}",
"PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})",
"Tensorflow version (GPU?)": F"{tf_version} ({tf_cuda_available})",
"Flax version (CPU?/GPU?/TPU?)": F"{flax_version} ({jax_backend})",
"Jax version": F"{jax_version}",
"JaxLib version": F"{jaxlib_version}",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(snake_case__ ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: int ):
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 360 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
_a = None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=0.999, lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase :Optional[int] = []
for i in range(lowerCamelCase ):
lowercase :Any = i / num_diffusion_timesteps
lowercase :str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ), lowerCamelCase ) )
return torch.tensor(lowerCamelCase, dtype=torch.floataa )
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase):
_a = 1
@register_to_config
def __init__( self: Any , _lowerCAmelCase: int = 10_00 , _lowerCAmelCase: float = 0.00_01 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: str = "linear" , _lowerCAmelCase: Optional[Union[np.ndarray, List[float]]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = 0 , _lowerCAmelCase: str = "epsilon" , _lowerCAmelCase: float = 1.0 , **_lowerCAmelCase: Union[str, Any] , ):
if kwargs.get("set_alpha_to_one" , _lowerCAmelCase ) is not None:
lowercase :Optional[int] = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase :str = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowercase :int = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase :List[Any] = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase :Tuple = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase :Any = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
lowercase :Dict = 1.0 - self.betas
lowercase :Dict = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase :Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase :Union[str, Any] = 1.0
# setable values
lowercase :str = None
lowercase :List[Any] = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: Optional[int] = None ):
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps." )
lowercase :List[Any] = num_inference_steps
lowercase :Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase :str = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
lowercase :str = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: bool = True , ):
# 1. get previous step value (=t+1)
lowercase :int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase :List[Any] = self.alphas_cumprod[timestep]
lowercase :Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase :Optional[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase :int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase :Optional[Any] = model_output
elif self.config.prediction_type == "sample":
lowercase :Union[str, Any] = model_output
lowercase :List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase :Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase :str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase :Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self: List[str] ):
return self.config.num_train_timesteps
| 158 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _A ( _lowercase ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def _A ( _lowercase , _lowercase ) -> XGBClassifier:
"""simple docstring"""
__UpperCamelCase = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = load_iris()
__UpperCamelCase, __UpperCamelCase = data_handling(_lowercase )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
__UpperCamelCase = iris['target_names']
# Create an XGBoost Classifier from the training data
__UpperCamelCase = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 310 |
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase=3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=99 , __lowerCamelCase=32 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=37 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=16 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ):
'''simple docstring'''
__A : Tuple = parent
__A : List[Any] = batch_size
__A : Dict = seq_length
__A : int = is_training
__A : Dict = use_input_mask
__A : int = use_token_type_ids
__A : Tuple = use_labels
__A : Dict = vocab_size
__A : Optional[int] = hidden_size
__A : Optional[int] = num_hidden_layers
__A : int = num_attention_heads
__A : int = intermediate_size
__A : Optional[int] = hidden_act
__A : Optional[Any] = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : Tuple = max_position_embeddings
__A : str = type_vocab_size
__A : List[str] = type_sequence_label_size
__A : Optional[Any] = initializer_range
__A : Union[str, Any] = num_labels
__A : Tuple = num_choices
__A : Union[str, Any] = scope
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : List[Any] = None
if self.use_input_mask:
__A : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Optional[Any] = None
__A : List[str] = None
__A : str = None
__A : Tuple = None
if self.use_labels:
__A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Any = ids_tensor([self.batch_size] , self.num_choices )
__A : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__( self ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = FalconModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
__A : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
__A : int = True
__A : Tuple = FalconModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : List[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
__A : Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
__A : Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
__A : List[str] = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
__A : List[Any] = True
__A : List[str] = True
__A : Dict = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
__A : Tuple = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
__A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
__A : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['''hidden_states'''][0]
__A : str = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['''hidden_states'''][0]
# select random slice
__A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : int = config_and_inputs
__A : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (FalconForCausalLM,) if is_torch_available() else ()
_lowerCamelCase = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = FalconModelTester(self )
__A : Dict = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def UpperCamelCase__( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , *__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__A : Union[str, Any] = alibi
self.model_tester.create_and_check_model(__lowerCamelCase , *__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : Dict = input_dict['''input_ids''']
__A : Optional[int] = input_ids.ne(1 ).to(__lowerCamelCase )
__A : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : List[Any] = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : int = 3
__A : str = '''single_label_classification'''
__A : Any = input_dict['''input_ids''']
__A : Tuple = input_ids.ne(1 ).to(__lowerCamelCase )
__A : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Dict = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : Optional[int] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = input_dict['''input_ids''']
__A : int = FalconForCausalLM(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : List[Any] = model(__lowerCamelCase , use_cache=__lowerCamelCase )
__A : List[str] = input_ids.shape[0]
__A : Tuple = model._convert_to_rw_cache(result.past_key_values )
__A : Union[str, Any] = model._convert_cache_to_standard_format(__lowerCamelCase , __lowerCamelCase )
for layer in range(len(__lowerCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Tuple = 3
__A : Dict = '''multi_label_classification'''
__A : Tuple = input_dict['''input_ids''']
__A : Any = input_ids.ne(1 ).to(__lowerCamelCase )
__A : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : Optional[Any] = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__( self ):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
__A , __A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__lowerCamelCase , '''use_cache''' ):
return
__A : int = model_class(__lowerCamelCase ).to(__lowerCamelCase )
if "use_cache" not in inputs:
__A : str = True
__A : Union[str, Any] = model(**__lowerCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__A : Optional[Any] = (
getattr(__lowerCamelCase , '''decoder_layers''' , __lowerCamelCase )
or getattr(__lowerCamelCase , '''num_decoder_layers''' , __lowerCamelCase )
or config.num_hidden_layers
)
__A : Any = getattr(__lowerCamelCase , '''num_kv_heads''' , config.num_attention_heads )
__A : Union[str, Any] = getattr(__lowerCamelCase , '''d_model''' , config.hidden_size )
__A : Dict = embed_dim // num_attention_heads
__A : str = outputs['''past_key_values''']
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
__A , __A : Optional[int] = inputs['''input_ids'''].shape
for i in range(__lowerCamelCase ):
if config.new_decoder_architecture:
__A : Tuple = config.num_attention_heads
elif config.multi_query:
__A : int = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
__A : Union[str, Any] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__lowerCamelCase )
__A : Any = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
__A : Dict = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
__A : Dict = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=19 )
__A : Optional[int] = tokenizer.batch_decode(__lowerCamelCase )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__A : str = AutoTokenizer.from_pretrained(__lowerCamelCase )
__A : List[Any] = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(__lowerCamelCase )
__A : Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 )
model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 )
model.generate(**__lowerCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__A : List[str] = AutoTokenizer.from_pretrained(__lowerCamelCase )
__A : str = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(device=__lowerCamelCase )
__A : List[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
# Test results are the same with and without cache
__A : str = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=20 , use_cache=__lowerCamelCase )
__A : Union[str, Any] = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=20 , use_cache=__lowerCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 291 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_snake_case : int = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
_snake_case : Tuple = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
_snake_case : Union[str, Any] = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def lowercase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ) -> Optional[Any]:
__lowerCAmelCase = 0.0
for i, j in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase_ , lowerCAmelCase_ ) else 0.0
__lowerCAmelCase = n_correct / len(lowerCAmelCase_ )
return {
"accuracy": accuracy,
}
| 207 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case : Dict = 0
_snake_case : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case : Tuple = tuple[int, int]
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None , ) -> None:
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = g_cost
__lowerCAmelCase = parent
__lowerCAmelCase = self.calculate_heuristic()
__lowerCAmelCase = self.g_cost + self.h_cost
def lowercase ( self : Any ) -> float:
__lowerCAmelCase = self.pos_x - self.goal_x
__lowerCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase_ ) + abs(lowerCAmelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Union[str, Any] , lowerCAmelCase_ : Node ) -> bool:
return self.f_cost < other.f_cost
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> Tuple:
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase_ )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , lowerCAmelCase_ )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = []
__lowerCAmelCase = False
def lowercase ( self : str ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase_ )
self.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = self.get_successors(lowerCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase_ )
else:
self.open_nodes.append(lowerCAmelCase_ )
return [self.start.pos]
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Node ) -> list[Node]:
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase_ , ) )
return successors
def lowercase ( self : Tuple , lowerCAmelCase_ : Node | None ) -> list[TPosition]:
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> None:
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = False
def lowercase ( self : Dict ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 )
__lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
self.fwd_astar.closed_nodes.append(lowerCAmelCase_ )
self.bwd_astar.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = current_bwd_node
__lowerCAmelCase = current_fwd_node
__lowerCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase_ )
else:
astar.open_nodes.append(lowerCAmelCase_ )
return [self.fwd_astar.start.pos]
def lowercase ( self : Dict , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> list[TPosition]:
__lowerCAmelCase = self.fwd_astar.retrace_path(lowerCAmelCase_ )
__lowerCAmelCase = self.bwd_astar.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case : List[Any] = (0, 0)
_snake_case : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case : int = time.time()
_snake_case : Optional[int] = AStar(init, goal)
_snake_case : int = a_star.search()
_snake_case : Union[str, Any] = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
_snake_case : Any = time.time()
_snake_case : Dict = BidirectionalAStar(init, goal)
_snake_case : Optional[int] = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 207 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> float:
return 0.0
def lowercase__ ( __snake_case : np.ndarray , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
UpperCAmelCase_ : Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowercase__ ( __snake_case : FilterType , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = 512
UpperCAmelCase_ : str = [1] + [0] * (size - 1)
UpperCAmelCase_ : Optional[Any] = [filter_type.process(__snake_case ) for item in inputs]
UpperCAmelCase_ : Dict = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCAmelCase_ : Optional[int] = np.abs(np.fft.fft(__snake_case ) )
UpperCAmelCase_ : List[str] = 20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
UpperCAmelCase_ : Union[str, Any] = get_bounds(__snake_case , __snake_case )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(__snake_case )
plt.show()
def lowercase__ ( __snake_case : FilterType , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = 512
UpperCAmelCase_ : Tuple = [1] + [0] * (size - 1)
UpperCAmelCase_ : Tuple = [filter_type.process(__snake_case ) for item in inputs]
UpperCAmelCase_ : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCAmelCase_ : Dict = np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(__snake_case , -2 * pi ) )
plt.show()
| 29 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def a__ ( self , _a , _a , _a ) -> int:
_A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def a__ ( self , _a , _a ) -> Dict:
_A : Any = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
_A : Optional[int] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def a__ ( self ) -> List[str]:
_A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_A : Dict = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
_A : Any = 3
_A : Any = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
_A : Optional[int] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
_A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_A : Dict = generator.model.config.eos_token_id
_A : List[str] = """<pad>"""
_A : Dict = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def a__ ( self ) -> int:
_A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_A : str = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 26 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
A_ : int = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
A_ : Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
A_ : Union[str, Any] = dict(zip(vocab, range(len(vocab))))
A_ : str = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[Any] = Path(tmpdirname)
A_ : Tuple = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
A_ : int = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
A_ : List[str] = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
A_ : Optional[Any] = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
A_ : List[str] = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
A_ : int = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
A_ : str = tokenizer(['Making tiny model'], return_tensors='pt')
A_ : List[str] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 141 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[Any] = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141 | 1 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] , lowerCamelCase_ :str = "cpu" , lowerCamelCase_ :str = "openai/clip-vit-large-patch14" ):
"""simple docstring"""
lowerCamelCase__ : Any =device
lowerCamelCase__ : Dict =CLIPTokenizerFast.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
lowerCamelCase__ : Dict =[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
lowerCamelCase__ : Dict =torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCamelCase__ : str =torchvision.transforms.Resize(224 )
lowerCamelCase__ : Optional[int] =torchvision.transforms.CenterCrop(224 )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =self.resize(lowerCamelCase_ )
lowerCamelCase__ : Any =self.center_crop(lowerCamelCase_ )
lowerCamelCase__ : Tuple =self.normalize(lowerCamelCase_ )
return images
def __call__( self :Any , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :str=None , **lowerCamelCase_ :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self.tokenizer(text=lowerCamelCase_ , **lowerCamelCase_ )
lowerCamelCase__ : str =self.preprocess_img(lowerCamelCase_ )
lowerCamelCase__ : Dict ={key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase_ :Dict=10 , lowerCamelCase_ :Optional[Any]=0.01 , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :int=None , lowerCamelCase_ :Dict=None , lowerCamelCase_ :Dict=None , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :int=False , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :List[Any]="image" , lowerCamelCase_ :str=True , lowerCamelCase_ :List[str]=False , lowerCamelCase_ :str=False , lowerCamelCase_ :Optional[Any]=False , ):
"""simple docstring"""
super().__init__()
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : Tuple =device if device else get_device()
if vqgan:
lowerCamelCase__ : Optional[int] =vqgan
else:
lowerCamelCase__ : Union[str, Any] =load_vqgan(self.device , conf_path=lowerCamelCase_ , ckpt_path=lowerCamelCase_ )
self.vqgan.eval()
if clip:
lowerCamelCase__ : List[str] =clip
else:
lowerCamelCase__ : Dict =CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
lowerCamelCase__ : Optional[Any] =ProcessorGradientFlow(device=self.device )
lowerCamelCase__ : Optional[Any] =iterations
lowerCamelCase__ : Union[str, Any] =lr
lowerCamelCase__ : List[Any] =log
lowerCamelCase__ : Tuple =make_grid
lowerCamelCase__ : int =return_val
lowerCamelCase__ : int =quantize
lowerCamelCase__ : int =self.vqgan.decoder.z_shape
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :int=None , lowerCamelCase_ :str=5 , lowerCamelCase_ :Any=True ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =[]
if output_path is None:
lowerCamelCase__ : Union[str, Any] ='./animation.gif'
if input_path is None:
lowerCamelCase__ : str =self.save_path
lowerCamelCase__ : Tuple =sorted(glob(input_path + '/*' ) )
if not len(lowerCamelCase_ ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(lowerCamelCase_ ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
lowerCamelCase__ : Any =total_duration / len(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =[frame_duration] * len(lowerCamelCase_ )
if extend_frames:
lowerCamelCase__ : Optional[int] =1.5
lowerCamelCase__ : Dict =3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(lowerCamelCase_ ) )
imageio.mimsave(lowerCamelCase_ , lowerCamelCase_ , duration=lowerCamelCase_ )
print(f"""gif saved to {output_path}""" )
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :str=None , lowerCamelCase_ :Any=None ):
"""simple docstring"""
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
lowerCamelCase__ : Tuple =preprocess(Image.open(lowerCamelCase_ ) , target_image_size=256 ).to(self.device )
lowerCamelCase__ : Dict =preprocess_vqgan(lowerCamelCase_ )
lowerCamelCase__ , *lowerCamelCase__ : Optional[Any] =self.vqgan.encode(lowerCamelCase_ )
return z
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : int =self.latent.detach().requires_grad_()
lowerCamelCase__ : List[Any] =base_latent + transform_vector
if self.quantize:
lowerCamelCase__ , *lowerCamelCase__ : int =self.vqgan.quantize(lowerCamelCase_ )
else:
lowerCamelCase__ : Union[str, Any] =trans_latent
return self.vqgan.decode(lowerCamelCase_ )
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any]=None ):
"""simple docstring"""
lowerCamelCase__ : str =self.clip_preprocessor(text=lowerCamelCase_ , images=lowerCamelCase_ , return_tensors='pt' , padding=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =self.clip(**lowerCamelCase_ )
lowerCamelCase__ : Any =clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase__ : Dict =similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self._get_clip_similarity(pos_prompts['prompts'] , lowerCamelCase_ , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
lowerCamelCase__ : Dict =self._get_clip_similarity(neg_prompts['prompts'] , lowerCamelCase_ , weights=neg_prompts['weights'] )
else:
lowerCamelCase__ : Union[str, Any] =torch.tensor([1] , device=self.device )
lowerCamelCase__ : Optional[int] =-torch.log(lowerCamelCase_ ) + torch.log(lowerCamelCase_ )
return loss
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :List[str] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =torch.randn_like(self.latent , requires_grad=lowerCamelCase_ , device=self.device )
lowerCamelCase__ : Dict =torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCamelCase__ : Dict =self._add_vector(lowerCamelCase_ )
lowerCamelCase__ : str =loop_post_process(lowerCamelCase_ )
lowerCamelCase__ : int =self._get_CLIP_loss(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
print('CLIP loss' , lowerCamelCase_ )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=lowerCamelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] ):
"""simple docstring"""
wandb.init(reinit=lowerCamelCase_ , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
lowerCamelCase__ : str =Image.open(lowerCamelCase_ )
lowerCamelCase__ : Any =image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(lowerCamelCase_ ) )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :List[Any] ):
"""simple docstring"""
if not prompts:
return []
lowerCamelCase__ : Optional[int] =[]
lowerCamelCase__ : str =[]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase__ : Dict =[prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(lowerCamelCase_ , (tuple, list) ):
lowerCamelCase__ : List[Any] =prompt[0]
lowerCamelCase__ : str =float(prompt[1] )
elif ":" in prompt:
lowerCamelCase__ , lowerCamelCase__ : List[str] =prompt.split(':' )
lowerCamelCase__ : Tuple =float(lowerCamelCase_ )
else:
lowerCamelCase__ : Any =prompt
lowerCamelCase__ : Tuple =1.0
processed_prompts.append(lowerCamelCase_ )
weights.append(lowerCamelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCamelCase_ , device=self.device ),
}
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
if image_path:
lowerCamelCase__ : Optional[Any] =self._get_latent(lowerCamelCase_ )
else:
lowerCamelCase__ : Dict =torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase__ : str =self.process_prompts(lowerCamelCase_ )
lowerCamelCase__ : Tuple =self.process_prompts(lowerCamelCase_ )
if save_final and save_path is None:
lowerCamelCase__ : List[str] =os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
else:
lowerCamelCase__ : Dict =save_path + '_' + get_timestamp()
os.makedirs(lowerCamelCase_ )
lowerCamelCase__ : Dict =save_path
lowerCamelCase__ : int =self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(lowerCamelCase_ ) )
lowerCamelCase__ : Any =loop_post_process(lowerCamelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ):
if show_intermediate:
show_pil(lowerCamelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'Image': wandb.Image(lowerCamelCase_ )} )
if show_final:
show_pil(lowerCamelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) ) | 126 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 126 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=8 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase_ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=512 , __UpperCamelCase=512 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Any = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowerCAmelCase_ : Optional[Any] = np.array(pil_image.convert("RGB" ) )
lowerCAmelCase_ : Dict = arr.astype(np.floataa ) / 1_27.5 - 1
lowerCAmelCase_ : str = np.transpose(lowerCAmelCase__ , [2, 0, 1] )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 )
return image
class __lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , a_ : Dict , a_ : Union[str, Any] , a_ : Dict , ):
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , movq=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self : Union[str, Any] , a_ : str , a_ : str , a_ : Union[str, Any] ):
# get the original timestep using init_timestep
lowerCAmelCase_ : str = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase ( self : Dict , a_ : Union[str, Any] , a_ : List[Any] , a_ : Any , a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] , a_ : List[str]=None ):
if not isinstance(SCREAMING_SNAKE_CASE_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE_ )}''' )
lowerCAmelCase_ : str = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowerCAmelCase_ : Optional[int] = image
else:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
lowerCAmelCase_ : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
else:
lowerCAmelCase_ : Any = self.movq.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = self.movq.config.scaling_factor * init_latents
lowerCAmelCase_ : List[str] = torch.cat([init_latents] , dim=0 )
lowerCAmelCase_ : Any = init_latents.shape
lowerCAmelCase_ : int = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
lowerCAmelCase_ : Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = init_latents
return latents
def lowerCamelCase ( self : Any , a_ : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCAmelCase_ : Any = torch.device(f'''cuda:{gpu_id}''' )
lowerCAmelCase_ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase ( self : Tuple , a_ : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowerCAmelCase_ : int = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=SCREAMING_SNAKE_CASE_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase_ : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase_ : Dict = cpu_offload_with_hook(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prev_module_hook=SCREAMING_SNAKE_CASE_ )
# We'll offload the last model manually.
lowerCAmelCase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self : List[Any] ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE_ )
def __call__( self : Optional[int] , a_ : Tuple , a_ : Dict , a_ : Any , a_ : Dict = 5_12 , a_ : List[str] = 5_12 , a_ : Any = 1_00 , a_ : Optional[int] = 4.0 , a_ : str = 0.3 , a_ : Any = 1 , a_ : Tuple = None , a_ : List[str] = "pil" , a_ : str = True , ):
lowerCAmelCase_ : int = self._execution_device
lowerCAmelCase_ : Any = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
lowerCAmelCase_ : int = image_embeds.shape[0]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Dict = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase_ : int = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
lowerCAmelCase_ : Tuple = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
lowerCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : str = [image]
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(SCREAMING_SNAKE_CASE_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
lowerCAmelCase_ : Optional[int] = torch.cat([prepare_image(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for i in image] , dim=0 )
lowerCAmelCase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self.movq.encode(SCREAMING_SNAKE_CASE_ )["""latents"""]
lowerCAmelCase_ : List[Any] = latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowerCAmelCase_ : Any = downscale_height_and_width(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.movq_scale_factor )
lowerCAmelCase_ : int = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , image_embeds.dtype , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ : int = {"""image_embeds""": image_embeds}
lowerCAmelCase_ : Union[str, Any] = self.unet(
sample=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , added_cond_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
if do_classifier_free_guidance:
lowerCAmelCase_ : Any = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase_ : List[str] = noise_pred.chunk(2 )
lowerCAmelCase_ : List[str] = variance_pred.chunk(2 )
lowerCAmelCase_ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase_ : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ : Optional[int] = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )[0]
# post-processing
lowerCAmelCase_ : List[Any] = self.movq.decode(SCREAMING_SNAKE_CASE_ , force_not_quantize=SCREAMING_SNAKE_CASE_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCAmelCase_ : Tuple = image * 0.5 + 0.5
lowerCAmelCase_ : Dict = image.clamp(0 , 1 )
lowerCAmelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase_ : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 366 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , a_ : List[str] , a_ : Tuple=7 , a_ : Any=3 , a_ : Union[str, Any]=18 , a_ : List[str]=30 , a_ : List[str]=4_00 , a_ : str=True , a_ : Tuple=None , a_ : str=True , a_ : Optional[int]=None , ):
lowerCAmelCase_ : Any = size if size is not None else {"shortest_edge": 20}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : Dict = batch_size
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : str = image_size
lowerCAmelCase_ : int = min_resolution
lowerCAmelCase_ : Tuple = max_resolution
lowerCAmelCase_ : str = do_resize
lowerCAmelCase_ : List[Any] = size
lowerCAmelCase_ : Any = do_center_crop
lowerCAmelCase_ : Tuple = crop_size
def lowerCamelCase ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : int = MobileNetVaImageProcessingTester(self )
@property
def lowerCamelCase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "do_center_crop" ) )
self.assertTrue(hasattr(a_ , "crop_size" ) )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCamelCase ( self : Tuple ):
pass
def lowerCamelCase ( self : Any ):
# Initialize image_processing
lowerCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase ( self : str ):
# Initialize image_processing
lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ : str = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 161 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
a__ : ClassVar[Features] = Features({} )
a__ : str = "text"
@property
def UpperCamelCase__ ( self) -> Dict[str, str]:
return {self.text_column: "text"}
| 43 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( _A , _A , _A ):
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
a : Any = [image]
if isinstance(image[0] , PIL.Image.Image ):
a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a : int = np.concatenate(_A , axis=0 )
a : int = np.array(_A ).astype(np.floataa ) / 255.0
a : str = image.transpose(0 , 3 , 1 , 2 )
a : str = 2.0 * image - 1.0
a : Optional[int] = torch.from_numpy(_A )
elif isinstance(image[0] , torch.Tensor ):
a : Optional[Any] = torch.cat(_A , dim=0 )
return image
def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ):
if not isinstance(_A , np.ndarray ):
a : Dict = True
a : Optional[Any] = va.device
a : Optional[int] = va.cpu().numpy()
a : Union[str, Any] = va.cpu().numpy()
a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) )
if np.abs(_A ) > DOT_THRESHOLD:
a : Any = (1 - t) * va + t * va
else:
a : Any = np.arccos(_A )
a : Tuple = np.sin(_A )
a : Optional[Any] = theta_a * t
a : List[Any] = np.sin(_A )
a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
a : int = sin_theta_t / sin_theta_a
a : Any = sa * va + sa * va
if inputs_are_torch:
a : Dict = torch.from_numpy(_A ).to(_A )
return va
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = F.normalize(_A , dim=-1 )
a : str = F.normalize(_A , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( _A , _A ):
for param in model.parameters():
a : int = value
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ):
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , )
a : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , __snake_case )
else feature_extractor.size['shortest_edge']
)
a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __snake_case )
set_requires_grad(self.clip_model , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
self.enable_attention_slicing(__snake_case )
def lowercase_ ( self : Optional[Any] ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : Tuple ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : int ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ):
# get the original timestep using init_timestep
a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case )
a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
a : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ):
if not isinstance(__snake_case , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" )
a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ):
a : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
a : Optional[Any] = torch.cat(__snake_case , dim=0 )
else:
a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : List[str] = 0.18215 * init_latents
a : str = init_latents.repeat_interleave(__snake_case , dim=0 )
a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
a : int = init_latents
return latents
def lowercase_ ( self : List[str] , __snake_case : Dict ):
a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ):
a : List[Any] = self.feature_extractor.preprocess(__snake_case )
a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
a : int = self.clip_model.get_image_features(__snake_case )
a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ):
a : Optional[Any] = latents.detach().requires_grad_()
a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
a : int = self.scheduler.alphas_cumprod[timestep]
a : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a : Tuple = torch.sqrt(__snake_case )
a : str = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __snake_case ):
a : List[Any] = self.scheduler.sigmas[index]
a : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Union[str, Any] = 1 / 0.18215 * sample
a : str = self.vae.decode(__snake_case ).sample
a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case )
a : List[str] = self.normalize(__snake_case ).to(latents.dtype )
a : List[str] = self.clip_model.get_image_features(__snake_case )
a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale
a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0]
if isinstance(self.scheduler , __snake_case ):
a : List[Any] = latents.detach() + grads * (sigma**2)
a : Optional[int] = noise_pred_original
else:
a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ):
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__snake_case , torch.Generator ) and batch_size > 1:
a : Dict = [generator] + [None] * (batch_size - 1)
a : Any = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
a : List[str] = [x[0] for x in coca_is_none if x[1]]
a : List[str] = ', '.join(__snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : int = self.get_image_description(__snake_case )
if style_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : Union[str, Any] = self.get_image_description(__snake_case )
# get prompt text embeddings for content and style
a : Optional[Any] = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
a : Dict = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
a : Any = slerp(__snake_case , __snake_case , __snake_case )
# duplicate text embeddings for each generation per prompt
a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 )
# set timesteps
a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
a : Any = {}
if accepts_offset:
a : Optional[Any] = 1
self.scheduler.set_timesteps(__snake_case , **__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device )
a : Optional[int] = timesteps[:1].repeat(__snake_case )
# Preprocess image
a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case )
a : List[Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : str = preprocess(__snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case )
if clip_guidance_scale > 0:
a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : int = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : List[str] = slerp(
__snake_case , __snake_case , __snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a : Any = content_text_input.input_ids.shape[-1]
a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' )
a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(
self.device )
else:
a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
a : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a : Union[str, Any] = {}
if accepts_eta:
a : List[str] = eta
# check if the scheduler accepts generator
a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
a : Any = generator
with self.progress_bar(total=__snake_case ):
for i, t in enumerate(__snake_case ):
# expand the latents if we are doing classifier free guidance
a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a , a : List[str] = noise_pred.chunk(2 )
a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
a , a : Union[str, Any] = self.cond_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# compute the previous noisy sample x_t -> x_t-1
a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Tuple = 1 / 0.18215 * latents
a : Optional[int] = self.vae.decode(__snake_case ).sample
a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : str = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case ) | 297 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _snake_case ( snake_case_ ):
_lowercase : Union[str, Any] = ComputeEnvironment.AMAZON_SAGEMAKER
_lowercase : List[Any] = True
_lowercase : Dict = "ml.p3.2xlarge"
_lowercase : Dict = "accelerate_sagemaker_execution_role"
_lowercase : Union[str, Any] = "hf-sm"
_lowercase : str = "us-east-1"
_lowercase : Optional[Any] = 1
_lowercase : Any = "accelerate-sagemaker-1"
_lowercase : str = "1.6"
_lowercase : List[str] = "4.4"
_lowercase : Optional[Any] = "train.py"
_lowercase : Any = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
_lowercase : List[str] = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['model_name_or_path'] , _A)
assert isinstance(converted_args['do_train'] , _A)
assert isinstance(converted_args['epochs'] , _A)
assert isinstance(converted_args['learning_rate'] , _A)
assert isinstance(converted_args['max_steps'] , _A)
with pytest.raises(_A):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
| 350 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = mam_aaa['args'] or mam_aaa['cfg']['model']
SCREAMING_SNAKE_CASE = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(_UpperCAmelCase)
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a_ : List[str] = parser.parse_args()
a_ : Dict = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 327 | 0 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A (datasets.BuilderConfig):
'''simple docstring'''
__lowercase: Optional[datasets.Features] = None
__lowercase: str = "utf-8"
__lowercase: Optional[str] = None
__lowercase: Optional[str] = None
__lowercase: bool = True # deprecated
__lowercase: Optional[int] = None # deprecated
__lowercase: int = 10 << 20 # 10MB
__lowercase: Optional[bool] = None
class __A (datasets.ArrowBasedBuilder):
'''simple docstring'''
__lowercase: int = JsonConfig
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
snake_case_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Any ) ->Optional[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
snake_case_ = data_files
if isinstance(_A , _A ):
snake_case_ = [files]
snake_case_ = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
snake_case_ = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
snake_case_ = [files]
snake_case_ = [dl_manager.iter_files(_A ) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : pa.Table ) ->pa.Table:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
snake_case_ = self.config.features.arrow_schema.field(_A ).type
snake_case_ = pa_table.append_column(_A , pa.array([None] * len(_A ) , type=_A ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case_ = table_cast(_A , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Any ) ->Union[str, Any]:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case_ = json.load(_A )
# We keep only the field we are interested in
snake_case_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_A , (list, tuple) ):
snake_case_ = set().union(*[row.keys() for row in dataset] )
snake_case_ = {col: [row.get(_A ) for row in dataset] for col in keys}
else:
snake_case_ = dataset
snake_case_ = pa.Table.from_pydict(_A )
yield file_idx, self._cast_table(_A )
# If the file has one json object per line
else:
with open(_A , """rb""" ) as f:
snake_case_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case_ = max(self.config.chunksize // 32 , 16 << 10 )
snake_case_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
snake_case_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_A )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case_ = batch.decode(self.config.encoding , errors=_A ).encode("""utf-8""" )
try:
while True:
try:
snake_case_ = paj.read_json(
io.BytesIO(_A ) , read_options=paj.ReadOptions(block_size=_A ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_A , pa.ArrowInvalid )
and "straddling" not in str(_A )
or block_size > len(_A )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(_A )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_A , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case_ = json.load(_A )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(_A )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_A , _A ): # list is the only sequence type supported in JSON
try:
snake_case_ = set().union(*[row.keys() for row in dataset] )
snake_case_ = {col: [row.get(_A ) for row in dataset] for col in keys}
snake_case_ = pa.Table.from_pydict(_A )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(_A )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(_A )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(_A )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_A )
batch_idx += 1
| 347 | """simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
while a != 0:
__snake_case , __snake_case : Optional[Any] = b % a, a
return b
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
__snake_case : Optional[Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase_ )
__snake_case , __snake_case , __snake_case : Optional[int] = 1, 0, a
__snake_case , __snake_case , __snake_case : int = 0, 1, m
while va != 0:
__snake_case : Union[str, Any] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 172 | 0 |
from __future__ import annotations
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Tuple = len(__a )
# We need to create solution object to save path.
__magic_name__ : Dict = [[0 for _ in range(__a )] for _ in range(__a )]
__magic_name__ : Any = run_maze(__a, 0, 0, __a )
if solved:
print("""\n""".join(str(__a ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
__magic_name__ : int = len(__a )
# Final check point.
if i == j == (size - 1):
__magic_name__ : Tuple = 1
return True
__magic_name__ : Any = (not i < 0) and (not j < 0) # Check lower bounds
__magic_name__ : Any = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__magic_name__ : Tuple = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__magic_name__ : str = 1
# check for directions
if (
run_maze(__a, i + 1, __a, __a )
or run_maze(__a, __a, j + 1, __a )
or run_maze(__a, i - 1, __a, __a )
or run_maze(__a, __a, j - 1, __a )
):
return True
__magic_name__ : Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = StableUnCLIPImgaImgPipeline
lowercase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ : Union[str, Any] = frozenset([] )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Any = 32
__magic_name__ : Union[str, Any] = embedder_hidden_size
# image encoding components
__magic_name__ : Optional[int] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase__ , projection_dim=lowerCAmelCase__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__magic_name__ : Any = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase__ )
__magic_name__ : int = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__magic_name__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__magic_name__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase__ , layers_per_block=1 , upcast_attention=lowerCAmelCase__ , use_linear_projection=lowerCAmelCase__ , )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
__magic_name__ : List[str] = AutoencoderKL()
__magic_name__ : List[str] = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 , lowerCAmelCase__=True ) -> List[Any]:
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__magic_name__ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
__magic_name__ : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__magic_name__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if pil_image:
__magic_name__ : Optional[Any] = input_image * 0.5 + 0.5
__magic_name__ : int = input_image.clamp(0 , 1 )
__magic_name__ : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ : Optional[int] = DiffusionPipeline.numpy_to_pil(lowerCAmelCase__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : List[str] = self.get_dummy_components()
__magic_name__ : int = StableUnCLIPImgaImgPipeline(**lowerCAmelCase__ )
__magic_name__ : List[str] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
inputs.update({"""image_embeds""": None} )
__magic_name__ : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : int = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Tuple = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase__ )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__magic_name__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
__magic_name__ : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe(lowerCAmelCase__ , """anime turle""" , generator=lowerCAmelCase__ , output_type="""np""" )
__magic_name__ : Optional[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__magic_name__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
__magic_name__ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ : Any = pipe(lowerCAmelCase__ , """anime turle""" , generator=lowerCAmelCase__ , output_type="""np""" )
__magic_name__ : Any = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
__magic_name__ : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[Any] = pipe(
lowerCAmelCase__ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
__magic_name__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 138 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119 | 0 |
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
if not isinstance(__a , __a ):
A__ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
A__ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
A__ = 1
for i in range(1 , __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 |
def __lowerCamelCase ( __a :float , __a :list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
A__ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__a ) )
return round(__a , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE :Union[str, Any] = get_tests_dir('fixtures')
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
# A mock response for an HTTP head request to emulate server down
__A = mock.Mock()
__A = 5_00
__A = {}
__A = HTTPError
__A = {}
# Download this model to make sure it's in the cache.
__A = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" ,return_value=A ) as mock_head:
__A = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self : Optional[int] ):
# This test is for deprecated behavior and can be removed in v5
__A = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls : List[str] ):
__A = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token ,repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def UpperCamelCase_ ( self : Dict ):
__A = WavaVecaFeatureExtractor.from_pretrained(A )
feature_extractor.push_to_hub("test-feature-extractor" ,use_auth_token=self._token )
__A = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A ,getattr(A ,A ) )
# Reset repo
delete_repo(token=self._token ,repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A ,repo_id="test-feature-extractor" ,push_to_hub=A ,use_auth_token=self._token )
__A = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A ,getattr(A ,A ) )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = WavaVecaFeatureExtractor.from_pretrained(A )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" ,use_auth_token=self._token )
__A = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A ,getattr(A ,A ) )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A ,repo_id="valid_org/test-feature-extractor-org" ,push_to_hub=A ,use_auth_token=self._token )
__A = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A ,getattr(A ,A ) )
def UpperCamelCase_ ( self : Tuple ):
CustomFeatureExtractor.register_for_auto_class()
__A = CustomFeatureExtractor.from_pretrained(A )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} ,)
__A = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' ,trust_remote_code=A )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,"CustomFeatureExtractor" )
| 15 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE :Tuple = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
SCREAMING_SNAKE_CASE :List[Any] = {
'camembert-base': 512,
}
SCREAMING_SNAKE_CASE :List[str] = '▁'
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] ,A : List[str] ,A : List[Any]="<s>" ,A : Tuple="</s>" ,A : Any="</s>" ,A : Optional[Any]="<s>" ,A : Tuple="<unk>" ,A : str="<pad>" ,A : int="<mask>" ,A : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] ,A : Optional[Dict[str, Any]] = None ,**A : Optional[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
__A = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__A = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
__A = len(self.fairseq_tokens_to_ids )
__A = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self : int ,A : List[int] ,A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self : Dict ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self : int ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : Any ,A : str ):
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : List[str] ,A : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A )
def UpperCamelCase_ ( self : Dict ,A : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ):
__A = []
__A = ""
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__A = True
__A = []
else:
current_sub_tokens.append(A )
__A = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self : Dict ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Union[str, Any] ,A : Any ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 15 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 357 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionPanoramaPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase (self ) -> List[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_snake_case = DDIMScheduler()
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_snake_case = CLIPTextModel(UpperCAmelCase )
_snake_case = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase (self , UpperCAmelCase , UpperCAmelCase=0 ) -> Tuple:
_snake_case = torch.manual_seed(UpperCAmelCase )
_snake_case = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> Tuple:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> Tuple:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase (self ) -> Any:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def lowercase (self ) -> Any:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = """french fries"""
_snake_case = sd_pipe(**UpperCAmelCase , negative_prompt=UpperCAmelCase )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase , view_batch_size=2 )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> Tuple:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=UpperCAmelCase )
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase (self , UpperCAmelCase=0 ) -> List[str]:
_snake_case = torch.manual_seed(UpperCAmelCase )
_snake_case = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> List[Any]:
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_snake_case = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=UpperCAmelCase )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_snake_case = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase (self ) -> Optional[int]:
_snake_case = 0
def callback_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_snake_case = False
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
_snake_case = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
pipe(**UpperCAmelCase , callback=UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase (self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
_snake_case = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase )
_snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9 | 270 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase = ["""image_processor""", """tokenizer"""]
UpperCAmelCase = """BlipImageProcessor"""
UpperCAmelCase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self ,a_ ,a_ ) -> List[str]:
_UpperCAmelCase : Any = False
super().__init__(UpperCamelCase__ ,UpperCamelCase__ )
_UpperCAmelCase : Any = self.image_processor
def __call__( self ,a_ = None ,a_ = None ,a_ = True ,a_ = False ,a_ = None ,a_ = None ,a_ = 0 ,a_ = None ,a_ = None ,a_ = False ,a_ = False ,a_ = False ,a_ = False ,a_ = False ,a_ = True ,a_ = None ,**a_ ,) -> List[Any]:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_UpperCAmelCase : List[str] = self.tokenizer
_UpperCAmelCase : List[str] = self.tokenizer(
text=UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ ,padding=UpperCamelCase__ ,truncation=UpperCamelCase__ ,max_length=UpperCamelCase__ ,stride=UpperCamelCase__ ,pad_to_multiple_of=UpperCamelCase__ ,return_attention_mask=UpperCamelCase__ ,return_overflowing_tokens=UpperCamelCase__ ,return_special_tokens_mask=UpperCamelCase__ ,return_offsets_mapping=UpperCamelCase__ ,return_token_type_ids=UpperCamelCase__ ,return_length=UpperCamelCase__ ,verbose=UpperCamelCase__ ,return_tensors=UpperCamelCase__ ,**UpperCamelCase__ ,)
return text_encoding
# add pixel_values
_UpperCAmelCase : Any = self.image_processor(UpperCamelCase__ ,return_tensors=UpperCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ ,padding=UpperCamelCase__ ,truncation=UpperCamelCase__ ,max_length=UpperCamelCase__ ,stride=UpperCamelCase__ ,pad_to_multiple_of=UpperCamelCase__ ,return_attention_mask=UpperCamelCase__ ,return_overflowing_tokens=UpperCamelCase__ ,return_special_tokens_mask=UpperCamelCase__ ,return_offsets_mapping=UpperCamelCase__ ,return_token_type_ids=UpperCamelCase__ ,return_length=UpperCamelCase__ ,verbose=UpperCamelCase__ ,return_tensors=UpperCamelCase__ ,**UpperCamelCase__ ,)
else:
_UpperCAmelCase : Any = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__ )
return encoding_image_processor
def _snake_case ( self ,*a_ ,**a_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCamelCase__ ,**UpperCamelCase__ )
def _snake_case ( self ,*a_ ,**a_ ) -> int:
return self.tokenizer.decode(*UpperCamelCase__ ,**UpperCamelCase__ )
@property
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : List[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 215 | import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def A ( _lowercase , _lowercase , _lowercase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = is_small_dataset(_lowercase )
assert result == expected
| 182 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
SCREAMING_SNAKE_CASE__ : int = {
"camembert-base": 512,
}
SCREAMING_SNAKE_CASE__ : Any = "▁"
class lowerCAmelCase__ ( __lowercase ):
a__ : Dict = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE__ : Dict="</s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : str="<pad>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<mask>" , SCREAMING_SNAKE_CASE__ : List[str]=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__lowerCamelCase = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
__lowerCamelCase = len(self.fairseq_tokens_to_ids )
__lowerCamelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self : Tuple ) -> Union[str, Any]:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __A ( self : int ) -> Tuple:
__lowerCamelCase = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
__lowerCamelCase = []
__lowerCamelCase = ''''''
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __getstate__( self : int ) -> int:
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 339 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = encoder_seq_length
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = d_ff
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = dropout_rate
__lowerCamelCase = initializer_factor
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = None
__lowerCamelCase = decoder_layers
def __A ( self : Any ) -> Tuple:
return TaConfig.from_pretrained('''google/umt5-base''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]:
if attention_mask is None:
__lowerCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if decoder_head_mask is None:
__lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if cross_attn_head_mask is None:
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __A ( self : List[Any] ) -> Tuple:
__lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = self.get_config()
__lowerCamelCase = config.num_attention_heads
__lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, input_dict
def __A ( self : Tuple ) -> List[str]:
__lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self : Optional[Any] ) -> Any:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self : List[Any] ) -> Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(
input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = result.last_hidden_state
__lowerCamelCase = result.past_key_values
__lowerCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval()
# first forward pass
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 )
__lowerCamelCase , __lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval()
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() )
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
a__ : List[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a__ : Tuple = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a__ : int = True
a__ : int = False
a__ : Tuple = False
a__ : Optional[int] = True
a__ : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a__ : Tuple = [0.8, 0.9]
def __A ( self : Tuple ) -> Tuple:
__lowerCamelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __A ( self : List[str] ) -> Union[str, Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __A ( self : Union[str, Any] ) -> Any:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Any:
__lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs[0]
__lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
model.to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ):
__lowerCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __A ( self : Tuple ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __A ( self : int ) -> Optional[Any]:
__lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
__lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids
# fmt: off
__lowerCamelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 339 | 1 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( lowercase__ ):
lowercase__ = VideoMAEConfig()
set_architecture_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if "finetuned" not in model_name:
lowercase__ = False
if "finetuned" in model_name:
lowercase__ = 'huggingface/label-files'
if "kinetics" in model_name:
lowercase__ = 400
lowercase__ = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
lowercase__ = 174
lowercase__ = 'something-something-v2-id2label.json'
else:
raise ValueError("""Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.""" )
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def _A ( lowercase__ , lowercase__ ):
if "small" in model_name:
lowercase__ = 384
lowercase__ = 1536
lowercase__ = 12
lowercase__ = 16
lowercase__ = 12
lowercase__ = 3
lowercase__ = 192
lowercase__ = 768
elif "large" in model_name:
lowercase__ = 1024
lowercase__ = 4096
lowercase__ = 24
lowercase__ = 16
lowercase__ = 12
lowercase__ = 8
lowercase__ = 512
lowercase__ = 2048
elif "huge" in model_name:
lowercase__ = 1280
lowercase__ = 5120
lowercase__ = 32
lowercase__ = 16
lowercase__ = 12
lowercase__ = 8
lowercase__ = 640
lowercase__ = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _A ( lowercase__ ):
if "encoder." in name:
lowercase__ = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
lowercase__ = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
lowercase__ = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowercase__ = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase__ = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
lowercase__ = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowercase__ = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
lowercase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
lowercase__ = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
lowercase__ = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
lowercase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowercase__ = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowercase__ = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowercase__ = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowercase__ = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowercase__ = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
lowercase__ = name.replace("""head""" , """classifier""" )
return name
def _A ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key.startswith("""encoder.""" ):
lowercase__ = key.replace("""encoder.""" , """""" )
if "qkv" in key:
lowercase__ = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
lowercase__ = config.decoder_hidden_size
lowercase__ = int(key_split[2] )
lowercase__ = 'decoder.decoder_layers.'
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = config.hidden_size
lowercase__ = int(key_split[1] )
lowercase__ = 'videomae.encoder.layer.'
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val
return orig_state_dict
def _A ( ):
lowercase__ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase__ = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = get_videomae_config(_SCREAMING_SNAKE_CASE )
if "finetuned" in model_name:
lowercase__ = VideoMAEForVideoClassification(_SCREAMING_SNAKE_CASE )
else:
lowercase__ = VideoMAEForPreTraining(_SCREAMING_SNAKE_CASE )
# download original checkpoint, hosted on Google Drive
lowercase__ = 'pytorch_model.bin'
gdown.cached_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE )
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
if "model" in files:
lowercase__ = files['model']
else:
lowercase__ = files['module']
lowercase__ = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# verify model on basic input
lowercase__ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowercase__ = prepare_video()
lowercase__ = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
if "finetuned" not in model_name:
lowercase__ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE )
lowercase__ = model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
lowercase__ = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowercase__ = torch.Size([1, 400] )
lowercase__ = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
lowercase__ = torch.Size([1, 174] )
lowercase__ = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
lowercase__ = torch.Size([1, 1408, 1536] )
lowercase__ = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
lowercase__ = torch.Size([1, 1408, 1536] )
lowercase__ = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowercase__ = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
lowercase__ = torch.Size([1, 1408, 1536] )
lowercase__ = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowercase__ = torch.Size([1, 400] )
lowercase__ = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowercase__ = torch.Size([1, 400] )
lowercase__ = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowercase__ = torch.Size([1, 400] )
lowercase__ = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
lowercase__ = torch.Size([1, 400] )
lowercase__ = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
lowercase__ = torch.Size([1, 1408, 1536] )
lowercase__ = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowercase__ = torch.Size([1, 174] )
lowercase__ = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
lowercase__ = torch.Size([1, 1408, 1536] )
lowercase__ = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowercase__ = torch.Size([1, 174] )
lowercase__ = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowercase__ = outputs.loss
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""nielsr""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 164 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
if rng is None:
a__: Any = global_rng
a__: int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=4_00 , lowercase=20_00 , lowercase=1 , lowercase=0.0 , lowercase=1_60_00 , lowercase=True , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = parent
a__: Optional[int] = batch_size
a__: Optional[Any] = min_seq_length
a__: Optional[int] = max_seq_length
a__: Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: Dict = feature_size
a__: Any = padding_value
a__: Optional[Any] = sampling_rate
a__: Optional[Any] = return_attention_mask
a__: str = do_normalize
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self , lowercase=False , lowercase=False) -> Tuple:
'''simple docstring'''
def _flatten(lowercase):
return list(itertools.chain(*lowercase))
if equal_length:
a__: Dict = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a__: List[Any] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a__: str = [np.asarray(lowercase) for x in speech_inputs]
return speech_inputs
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = WavaVecaFeatureExtractor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = WavaVecaFeatureExtractionTester(self)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowercase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0) - 1) < 1e-3))
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a__: Optional[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: List[str] = [np.asarray(lowercase) for speech_input in speech_inputs]
# Test not batched input
a__: Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='np').input_values
a__: Dict = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test batched
a__: Dict = feat_extract(lowercase , return_tensors='np').input_values
a__: int = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test 2-D numpy arrays are batched.
a__: int = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
a__: Union[str, Any] = np.asarray(lowercase)
a__: int = feat_extract(lowercase , return_tensors='np').input_values
a__: Any = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: List[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Optional[int] = ['longest', 'max_length', 'do_not_pad']
a__: List[Any] = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: Dict = feat_extract(lowercase , padding=lowercase , max_length=lowercase , return_tensors='np')
a__: Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Optional[int] = range(8_00 , 14_00 , 2_00)
a__: List[str] = [floats_list((1, x))[0] for x in lengths]
a__: Tuple = ['longest', 'max_length', 'do_not_pad']
a__: Dict = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: int = feat_extract(lowercase , max_length=lowercase , padding=lowercase)
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Any = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Dict = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='max_length' , return_tensors='np')
a__: int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: int = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: str = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='longest' , return_tensors='np')
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
a__: Dict = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Tuple = feat_extract(
lowercase , truncation=lowercase , max_length=20_00 , padding='longest' , return_tensors='np')
a__: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
@require_torch
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
import torch
a__: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Tuple = np.random.rand(1_00).astype(np.floataa)
a__: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a__: Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
a__: str = WavaVecaConfig.from_pretrained(lowercase)
a__: str = WavaVecaFeatureExtractor.from_pretrained(lowercase)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
| 290 | 0 |
def __lowerCamelCase ( __magic_name__ : str ):
a__: Optional[int] =[]
a__: Optional[int] =[]
a__: int ={
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
a__: Dict =len(__magic_name__ ) if (len(__magic_name__ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(__magic_name__ ) , "Postfix".center(__magic_name__ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__magic_name__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__magic_name__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__magic_name__ ) == 0:
stack.append(__magic_name__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__magic_name__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__magic_name__ ) # push x to stack
print(
x.center(8 ) , ("".join(__magic_name__ )).ljust(__magic_name__ ) , ("".join(__magic_name__ )).ljust(__magic_name__ ) , sep=" | " , ) # Output in tabular format
while len(__magic_name__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(__magic_name__ )).ljust(__magic_name__ ) , ("".join(__magic_name__ )).ljust(__magic_name__ ) , sep=" | " , ) # Output in tabular format
return "".join(__magic_name__ ) # return Postfix as str
def __lowerCamelCase ( __magic_name__ : Optional[Any] ):
a__: Any =list(infix[::-1] ) # reverse the infix equation
for i in range(len(__magic_name__ ) ):
if infix[i] == "(":
a__: List[Any] =")" # change "(" to ")"
elif infix[i] == ")":
a__: Optional[int] ="(" # change ")" to "("
return (infix_2_postfix("".join(__magic_name__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__UpperCAmelCase = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 42 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = '''mobilenet_v1'''
def __init__( self : int , _a : Tuple=3 , _a : str=2_2_4 , _a : Dict=1.0 , _a : List[Any]=8 , _a : Tuple="relu6" , _a : Dict=True , _a : Optional[int]=0.9_9_9 , _a : List[Any]=0.0_2 , _a : Optional[Any]=0.0_0_1 , **_a : Optional[int] , ):
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__: str =num_channels
a__: Union[str, Any] =image_size
a__: Dict =depth_multiplier
a__: Union[str, Any] =min_depth
a__: Any =hidden_act
a__: int =tf_padding
a__: Dict =classifier_dropout_prob
a__: Any =initializer_range
a__: List[str] =layer_norm_eps
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self : int ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowerCamelCase ( self : Tuple ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowerCamelCase ( self : Dict ):
return 1e-4
| 42 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
A__ = 384
A__ = 7
if "tiny" in model_name:
A__ = 96
A__ = (2, 2, 6, 2)
A__ = (3, 6, 12, 24)
elif "small" in model_name:
A__ = 96
A__ = (2, 2, 18, 2)
A__ = (3, 6, 12, 24)
elif "base" in model_name:
A__ = 128
A__ = (2, 2, 18, 2)
A__ = (4, 8, 16, 32)
A__ = 12
A__ = 512
elif "large" in model_name:
A__ = 192
A__ = (2, 2, 18, 2)
A__ = (6, 12, 24, 48)
A__ = 12
A__ = 768
# set label information
A__ = 150
A__ = 'huggingface/label-files'
A__ = 'ade20k-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
A__ = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , )
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 )
A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 )
A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(4 , in_channel // 4 )
A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(in_channel // 4 , 4 )
A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[
'state_dict'
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ )
A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "bn" in key:
A__ = key.replace('bn' , 'batch_norm' )
A__ = val
# rename keys
A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ )
if "norm" in key:
A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify on image
A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' )
A__ = SegformerImageProcessor()
A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
A__ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
A__ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
A__ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __a(SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0]
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase__ = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class a__ ( snake_case__ ):
_a : Union[str, Any] = """ernie_m"""
_a : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , _A = 2_5_0_0_0_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_4 , _A = 0.02 , _A = 1 , _A = 1E-0_5 , _A=None , _A=False , _A=0.0 , **_A , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , **_A )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = classifier_dropout
__lowerCAmelCase = is_decoder
__lowerCAmelCase = act_dropout
| 352 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 102 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
# test for the above condition
self.test()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = 0
lowerCamelCase = False
while not completed:
if counter == 1:
self.reset()
lowerCamelCase = self.advance()
if not self.does_advance(_a ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase = self.update(_a )
counter += 1
if counter > 10_000:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def _lowerCAmelCase ( self ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _lowerCAmelCase ( self ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _lowerCAmelCase ( self ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _lowerCAmelCase ( self , _a=False ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
super(_a , self ).__init__()
if not isinstance(_a , _a ) or len(_a ) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(_a , _a ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
lowerCamelCase = token_ids
lowerCamelCase = len(self.token_ids )
lowerCamelCase = -1 # the index of the currently fulfilled step
lowerCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(_a )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(_a )}' )
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
if self.does_advance(_a ):
self.fulfilled_idx += 1
lowerCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCamelCase = True
lowerCamelCase = completed
else:
# failed to make progress.
lowerCamelCase = True
self.reset()
return stepped, completed, reset
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = False
lowerCamelCase = 0
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def _lowerCAmelCase ( self , _a=False ):
"""simple docstring"""
lowerCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
lowerCamelCase = self.seqlen
lowerCamelCase = self.fulfilled_idx
lowerCamelCase = self.completed
return new_constraint
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=True ):
"""simple docstring"""
lowerCamelCase = max([len(_a ) for one in nested_token_ids] )
lowerCamelCase = {}
for token_ids in nested_token_ids:
lowerCamelCase = root
for tidx, token_id in enumerate(_a ):
if token_id not in level:
lowerCamelCase = {}
lowerCamelCase = level[token_id]
if no_subsets and self.has_subsets(_a , _a ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f' {nested_token_ids}.' )
lowerCamelCase = root
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = self.trie
for current_token in current_seq:
lowerCamelCase = start[current_token]
lowerCamelCase = list(start.keys() )
return next_tokens
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = self.next_tokens(_a )
return len(_a ) == 0
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = list(root.values() )
if len(_a ) == 0:
return 1
else:
return sum([self.count_leaves(_a ) for nn in next_nodes] )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.count_leaves(_a )
return len(_a ) != leaf_count
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
super(_a , self ).__init__()
if not isinstance(_a , _a ) or len(_a ) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(_a , _a ) for token_ids in nested_token_ids ):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(_a , _a ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
lowerCamelCase = DisjunctiveTrie(_a )
lowerCamelCase = nested_token_ids
lowerCamelCase = self.trie.max_height
lowerCamelCase = []
lowerCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.trie.next_tokens(self.current_seq )
if len(_a ) == 0:
return None
else:
return token_list
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}' )
lowerCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}' )
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
if self.does_advance(_a ):
self.current_seq.append(_a )
lowerCamelCase = True
else:
lowerCamelCase = True
self.reset()
lowerCamelCase = self.trie.reached_leaf(self.current_seq )
lowerCamelCase = completed
return stepped, completed, reset
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = False
lowerCamelCase = []
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _lowerCAmelCase ( self , _a=False ):
"""simple docstring"""
lowerCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCamelCase = self.seqlen
lowerCamelCase = self.current_seq
lowerCamelCase = self.completed
return new_constraint
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
lowerCamelCase = constraints
# max # of steps required to fulfill a given constraint
lowerCamelCase = max([c.seqlen for c in constraints] )
lowerCamelCase = len(_a )
lowerCamelCase = False
self.init_state()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = []
lowerCamelCase = None
lowerCamelCase = [constraint.copy(stateful=_a ) for constraint in self.constraints]
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCamelCase = constraint.advance()
if isinstance(_a , _a ):
token_list.append(_a )
elif isinstance(_a , _a ):
token_list.extend(_a )
else:
lowerCamelCase = self.inprogress_constraint.advance()
if isinstance(_a , _a ):
token_list.append(_a )
elif isinstance(_a , _a ):
token_list.extend(_a )
if len(_a ) == 0:
return None
else:
return token_list
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCamelCase , lowerCamelCase = self.add(_a )
# the entire list of constraints are fulfilled
if self.completed:
break
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' )
lowerCamelCase , lowerCamelCase = False, False
if self.completed:
lowerCamelCase = True
lowerCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCamelCase , lowerCamelCase , lowerCamelCase = self.inprogress_constraint.update(_a )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_a ) )
lowerCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_a ):
lowerCamelCase , lowerCamelCase , lowerCamelCase = pending_constraint.update(_a )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_a )
lowerCamelCase = None
if not complete and stepped:
lowerCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _lowerCAmelCase ( self , _a=True ):
"""simple docstring"""
lowerCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCamelCase = [
constraint.copy(stateful=_a ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCamelCase = self.inprogress_constraint.copy(stateful=_a )
lowerCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 291 |
"""simple docstring"""
def a__ ( snake_case__ ) -> bool:
lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a__ ( snake_case__ = 50_00 ) -> int:
lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__ )]
for i, pentagonal_i in enumerate(snake_case__ ):
for j in range(snake_case__ , len(snake_case__ ) ):
lowerCamelCase = pentagonal_nums[j]
lowerCamelCase = pentagonal_i + pentagonal_j
lowerCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 291 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _lowercase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('-f' )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
return args.f
def _lowercase ( UpperCamelCase_ , UpperCamelCase_="eval" ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , F'{split}_results.json' )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , 'r' ) as f:
return json.load(UpperCamelCase_ )
raise ValueError(F'can\'t find {path}' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ = F'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(UpperCAmelCase_ , 'argv' , UpperCAmelCase_ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE__ = get_results(UpperCAmelCase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ = F'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(UpperCAmelCase_ , 'argv' , UpperCAmelCase_ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE__ = get_results(UpperCAmelCase_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ = F'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(UpperCAmelCase_ , 'argv' , UpperCAmelCase_ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE__ = get_results(UpperCAmelCase_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ = F'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(UpperCAmelCase_ , 'argv' , UpperCAmelCase_ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE__ = get_results(UpperCAmelCase_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ = F'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(UpperCAmelCase_ , 'argv' , UpperCAmelCase_ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE__ = get_results(UpperCAmelCase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def A_ ( self : Tuple ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
SCREAMING_SNAKE_CASE__ = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ = F'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(UpperCAmelCase_ , 'argv' , UpperCAmelCase_ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE__ = get_results(UpperCAmelCase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ = F'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(UpperCAmelCase_ , 'argv' , UpperCAmelCase_ ):
run_qa.main()
SCREAMING_SNAKE_CASE__ = get_results(UpperCAmelCase_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 365 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
A__ : str =field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
A__ : ClassVar[Features] =Features({"""audio""": Audio()} )
A__ : ClassVar[Features] =Features({"""labels""": ClassLabel} )
A__ : str ="audio"
A__ : str ="labels"
def A_ ( self : List[Any] , UpperCAmelCase_ : Optional[Any] ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , UpperCAmelCase_ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def A_ ( self : Union[str, Any] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 169 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A__ : List[Any] = logging.get_logger(__name__)
A__ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : int = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
A__ : Optional[Any] = {
'distilbert-base-uncased': 5_12,
'distilbert-base-uncased-distilled-squad': 5_12,
'distilbert-base-cased': 5_12,
'distilbert-base-cased-distilled-squad': 5_12,
'distilbert-base-german-cased': 5_12,
'distilbert-base-multilingual-cased': 5_12,
}
A__ : List[str] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = DistilBertTokenizer
def __init__( self : List[Any], lowerCamelCase : List[Any]=None, lowerCamelCase : Dict=None, lowerCamelCase : str=True, lowerCamelCase : Optional[int]="[UNK]", lowerCamelCase : Optional[Any]="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Any="[CLS]", lowerCamelCase : Union[str, Any]="[MASK]", lowerCamelCase : str=True, lowerCamelCase : int=None, **lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : List[Any]=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : str, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 207 | 1 |
'''simple docstring'''
import os
def _A ( ) -> List[str]:
_lowercase : Optional[Any] = os.path.join(os.path.dirname(lowerCamelCase__ ) , "num.txt" )
with open(lowerCamelCase__ ) as file_hand:
return str(sum(int(lowerCamelCase__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 354 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = StableUnCLIPImgaImgPipeline
_SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE : List[Any] = frozenset([] )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = 32
_lowercase : Any = embedder_hidden_size
# image encoding components
_lowercase : Optional[int] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_lowercase : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_UpperCamelCase , projection_dim=_UpperCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_lowercase : str = StableUnCLIPImageNormalizer(embedding_dim=_UpperCamelCase )
_lowercase : Dict = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
_lowercase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_lowercase : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_lowercase : List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCamelCase , layers_per_block=1 , upcast_attention=_UpperCamelCase , use_linear_projection=_UpperCamelCase , )
torch.manual_seed(0 )
_lowercase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="v_prediction" , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
_lowercase : Dict = AutoencoderKL()
_lowercase : int = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=0 , _UpperCamelCase=True ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith("mps" ):
_lowercase : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
_lowercase : List[str] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowercase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
if pil_image:
_lowercase : Optional[Any] = input_image * 0.5 + 0.5
_lowercase : Optional[int] = input_image.clamp(0 , 1 )
_lowercase : str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowercase : int = DiffusionPipeline.numpy_to_pil(_UpperCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : List[str] = self.get_dummy_components()
_lowercase : str = StableUnCLIPImgaImgPipeline(**_UpperCamelCase )
_lowercase : int = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : str = self.get_dummy_inputs(_UpperCamelCase )
inputs.update({"image_embeds": None} )
_lowercase : Tuple = sd_pipe(**_UpperCamelCase ).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_UpperCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_UpperCamelCase )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_lowercase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
_lowercase : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : int = torch.Generator(device="cpu" ).manual_seed(0 )
_lowercase : List[Any] = pipe(_UpperCamelCase , "anime turle" , generator=_UpperCamelCase , output_type="np" )
_lowercase : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_lowercase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
_lowercase : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_lowercase : Any = pipe(_UpperCamelCase , "anime turle" , generator=_UpperCamelCase , output_type="np" )
_lowercase : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowercase : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
_lowercase : Tuple = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Optional[int] = pipe(
_UpperCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
_lowercase : str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 199 | 0 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase ( A ):
def __init__( self : Optional[Any] , *__lowercase : str , **__lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
__lowercase ={}
def snake_case ( self : Union[str, Any] , __lowercase : List[Any] , *__lowercase : Optional[int] , **__lowercase : int ):
"""simple docstring"""
__lowercase =super().add_tokens(__lowercase , *__lowercase , **__lowercase )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def snake_case ( self : int , __lowercase : List[Any] , *__lowercase : Union[str, Any] , __lowercase : Dict=1 , **__lowercase : Dict ):
"""simple docstring"""
__lowercase =[]
if num_vec_per_token == 1:
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
else:
__lowercase =[]
for i in range(__lowercase ):
__lowercase =placeholder_token + f'''_{i}'''
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
__lowercase =output
def snake_case ( self : Tuple , __lowercase : Optional[int] , __lowercase : Optional[int]=False , __lowercase : Optional[int]=1.0 ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
__lowercase =[]
for i in range(len(__lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__lowercase =self.token_map[placeholder_token]
__lowercase =tokens[: 1 + int(len(__lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
__lowercase =copy.copy(__lowercase )
random.shuffle(__lowercase )
__lowercase =text.replace(__lowercase , ' '.join(__lowercase ) )
return text
def __call__( self : int , __lowercase : List[Any] , *__lowercase : Tuple , __lowercase : Optional[Any]=False , __lowercase : Dict=1.0 , **__lowercase : List[Any] ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
def snake_case ( self : Dict , __lowercase : List[str] , *__lowercase : Tuple , __lowercase : Dict=False , __lowercase : List[str]=1.0 , **__lowercase : Optional[int] ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
| 141 |
'''simple docstring'''
UpperCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
UpperCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
UpperCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 141 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 278 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , A__ : int , A__ : List[str]=7 , A__ : Tuple=3 , A__ : List[str]=10 , A__ : Optional[int]=18 , A__ : int=30 , A__ : Tuple=400 , A__ : Dict=True , A__ : str=None , A__ : str=True , A__ : List[str]=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : List[Any]=None , ) -> int:
_snake_case = size if size is not None else {'''shortest_edge''': 18}
_snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = num_frames
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = crop_size
def UpperCamelCase_ ( self : List[str] ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Tuple = VivitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Union[str, Any] ) -> List[str]:
_snake_case = VivitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Optional[int] ) -> Optional[Any]:
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''image_mean''' ) )
self.assertTrue(hasattr(A__ , '''image_std''' ) )
self.assertTrue(hasattr(A__ , '''do_normalize''' ) )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
def UpperCamelCase_ ( self : int ) -> List[Any]:
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self : Any ) -> List[str]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 278 | 1 |
'''simple docstring'''
from math import ceil, sqrt
def __lowercase ( __lowercase = 100_0000 ) -> int:
'''simple docstring'''
_A = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_A = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_A = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 79 |
'''simple docstring'''
import math
import unittest
def snake_case ( UpperCAmelCase )-> bool:
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :List[Any] ) -> str:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowercase_ ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(_A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 161 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__magic_name__ = 'src/transformers'
# Matches is_xxx_available()
__magic_name__ = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__magic_name__ = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__magic_name__ = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__magic_name__ = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__magic_name__ = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__magic_name__ = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__magic_name__ = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__magic_name__ = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__magic_name__ = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__magic_name__ = re.compile(R"^\s*try:")
# Catches a line with else:
__magic_name__ = re.compile(R"^\s*else:")
def _lowerCAmelCase ( UpperCamelCase_ ) -> Any:
if _re_test_backend.search(_A ) is None:
return None
__SCREAMING_SNAKE_CASE = [b[0] for b in _re_backend.findall(_A )]
backends.sort()
return "_and_".join(_A )
def _lowerCAmelCase ( UpperCamelCase_ ) -> Optional[Any]:
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = 0
while line_index < len(_A ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_A ):
return None
# First grab the objects without a specific backend in _import_structure
__SCREAMING_SNAKE_CASE = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__SCREAMING_SNAKE_CASE = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_A ):
__SCREAMING_SNAKE_CASE = _re_one_line_import_struct.search(_A ).groups()[0]
__SCREAMING_SNAKE_CASE = re.findall("""\[([^\]]+)\]""" , _A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__SCREAMING_SNAKE_CASE = _re_import_struct_key_value.search(_A )
if single_line_import_search is not None:
__SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_A ) > 0]
objects.extend(_A )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__SCREAMING_SNAKE_CASE = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__SCREAMING_SNAKE_CASE = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__SCREAMING_SNAKE_CASE = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__SCREAMING_SNAKE_CASE = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__SCREAMING_SNAKE_CASE = lines[line_index]
if _re_import_struct_add_one.search(_A ) is not None:
objects.append(_re_import_struct_add_one.search(_A ).groups()[0] )
elif _re_import_struct_add_many.search(_A ) is not None:
__SCREAMING_SNAKE_CASE = _re_import_struct_add_many.search(_A ).groups()[0].split(""", """ )
__SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_between_brackets.search(_A ) is not None:
__SCREAMING_SNAKE_CASE = _re_between_brackets.search(_A ).groups()[0].split(""", """ )
__SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_quote_object.search(_A ) is not None:
objects.append(_re_quote_object.search(_A ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__SCREAMING_SNAKE_CASE = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__SCREAMING_SNAKE_CASE = []
while (
line_index < len(_A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__SCREAMING_SNAKE_CASE = lines[line_index]
__SCREAMING_SNAKE_CASE = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__SCREAMING_SNAKE_CASE = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(_A ):
# If the line is an if is_backend_available, we grab all objects associated.
__SCREAMING_SNAKE_CASE = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__SCREAMING_SNAKE_CASE = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__SCREAMING_SNAKE_CASE = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__SCREAMING_SNAKE_CASE = lines[line_index]
__SCREAMING_SNAKE_CASE = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__SCREAMING_SNAKE_CASE = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
def find_duplicates(UpperCamelCase_ ):
return [k for k, v in collections.Counter(_A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__SCREAMING_SNAKE_CASE = []
for key in import_dict_objects.keys():
__SCREAMING_SNAKE_CASE = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
__SCREAMING_SNAKE_CASE = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__SCREAMING_SNAKE_CASE = 'base imports' if key == 'none' else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _lowerCAmelCase ( ) -> Any:
__SCREAMING_SNAKE_CASE = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
__SCREAMING_SNAKE_CASE = os.path.join(_A , """__init__.py""" )
__SCREAMING_SNAKE_CASE = parse_init(_A )
if objects is not None:
__SCREAMING_SNAKE_CASE = analyze_results(*_A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(_A ) )
if len(_A ) > 0:
raise ValueError("""\n\n""".join(_A ) )
def _lowerCAmelCase ( ) -> Tuple:
__SCREAMING_SNAKE_CASE = []
for path, directories, files in os.walk(_A ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_A ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__SCREAMING_SNAKE_CASE = str((Path(_A ) / folder).relative_to(_A ) )
__SCREAMING_SNAKE_CASE = short_path.replace(os.path.sep , """.""" )
submodules.append(_A )
for fname in files:
if fname == "__init__.py":
continue
__SCREAMING_SNAKE_CASE = str((Path(_A ) / fname).relative_to(_A ) )
__SCREAMING_SNAKE_CASE = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_A )
return submodules
__magic_name__ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def _lowerCAmelCase ( ) -> Dict:
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(_A , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__SCREAMING_SNAKE_CASE = spec.loader.load_module()
__SCREAMING_SNAKE_CASE = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = '\n'.join(f"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 358 |
"""simple docstring"""
from string import ascii_uppercase
__magic_name__ = {str(ord(c) - 55): c for c in ascii_uppercase}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while div != 1:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = divmod(UpperCamelCase_ , UpperCamelCase_ )
if base >= 11 and 9 < mod < 36:
__SCREAMING_SNAKE_CASE = ALPHABET_VALUES[str(UpperCamelCase_ )]
else:
__SCREAMING_SNAKE_CASE = str(UpperCamelCase_ )
new_value += actual_value
__SCREAMING_SNAKE_CASE = num // base
__SCREAMING_SNAKE_CASE = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 255 | 0 |
import math
import os
import sys
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = """"""
try:
with open(A_, """rb""" ) as binary_file:
__magic_name__ = binary_file.read()
for dat in data:
__magic_name__ = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
lexicon.pop(A_ )
__magic_name__ = last_match_id
if math.loga(A_ ).is_integer():
for curr_key in lexicon:
__magic_name__ = """0""" + lexicon[curr_key]
__magic_name__ = bin(A_ )[2:]
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {"""0""": """0""", """1""": """1"""}
__magic_name__ , __magic_name__ = """""", """"""
__magic_name__ = len(A_ )
for i in range(len(A_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__magic_name__ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(A_, A_, A_, A_ )
index += 1
__magic_name__ = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__magic_name__ = lexicon[curr_string]
result += last_match_id
return result
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = os.path.getsize(A_ )
__magic_name__ = bin(A_ )[2:]
__magic_name__ = len(A_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = 8
try:
with open(A_, """wb""" ) as opened_file:
__magic_name__ = [
to_write[i : i + byte_length]
for i in range(0, len(A_ ), A_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(A_, 2 ).to_bytes(1, byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = read_file_binary(A_ )
__magic_name__ = compress_data(A_ )
__magic_name__ = add_file_length(A_, A_ )
write_file_binary(A_, A_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 88 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( __a ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : Any = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __a = 10 ):
return sum(
int(''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : List[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :list[list[int]] = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCAmelCase_ :Tuple = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 362 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 85 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A : Dict = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__A : Optional[int] = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__A : Dict = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[List[List[str]]] , UpperCAmelCase_ : List[List[str]] , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCAmelCase_ , hypotheses=UpperCAmelCase_ , min_len=UpperCAmelCase_ , max_len=UpperCAmelCase_ )
}
| 138 | 0 |
from __future__ import annotations
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( snake_case_ : Callable[[int | float], int | float] , snake_case_ : int | float , snake_case_ : int | float , snake_case_ : int = 100 , ):
snake_case__ : Optional[Any] = x_start
snake_case__ : Union[str, Any] = fnc(snake_case_ )
snake_case__ : List[Any] = 0.0
for _ in range(snake_case_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
snake_case__ : Optional[Any] = (x_end - x_start) / steps + xa
snake_case__ : Dict = fnc(snake_case_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
snake_case__ : Optional[Any] = xa
snake_case__ : Optional[int] = fxa
return area
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
__lowerCamelCase : Optional[Any] = 10
while i <= 10_0000:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 286 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Union[str, Any] = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286 | 1 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Optional[int]:
_a : Any =tesseract_config if tesseract_config is not None else """"""
# apply OCR
_a : Optional[Any] =to_pil_image(_UpperCAmelCase )
_a , _a : List[Any] =pil_image.size
_a : List[str] =pytesseract.image_to_data(_UpperCAmelCase ,lang=_UpperCAmelCase ,output_type="""dict""" ,config=_UpperCAmelCase )
_a , _a , _a , _a , _a : str =data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_a : Tuple =[idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
_a : List[Any] =[word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Dict =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : List[str] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : List[str] =[]
for x, y, w, h in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : int =[x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
_a : str =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = ["pixel_values"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = "" , **SCREAMING_SNAKE_CASE :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : List[Any] =size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Dict =do_resize
_a : Tuple =size
_a : str =resample
_a : Dict =apply_ocr
_a : Union[str, Any] =ocr_lang
_a : Dict =tesseract_config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_a : Any =(size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Optional[int] =do_resize if do_resize is not None else self.do_resize
_a : Optional[int] =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[str] =resample if resample is not None else self.resample
_a : int =apply_ocr if apply_ocr is not None else self.apply_ocr
_a : str =ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Union[str, Any] =tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[str] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_a : List[Any] =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_a : Any =[]
_a : Any =[]
for image in images:
_a , _a : int =apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
_a : Union[str, Any] =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_a : Dict =[flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
_a : str =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : str =BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
_a : List[Any] =words_batch
_a : Dict =boxes_batch
return data
| 276 |
'''simple docstring'''
A__: Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__: int = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_a : List[str] =year // 100
_a : List[str] =(5 * (century % 4) + 2) % 7
_a : Optional[int] =year % 100
_a : Any =centurian % 12
_a : int =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_a : Optional[Any] =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_a : str =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
lowercase__ = nn.Parameter(SCREAMING_SNAKE_CASE )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
lowercase__ = nn.Parameter(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = np.asarray(weights[0] )
lowercase__ = np.asarray(weights[1] )
lowercase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = np.asarray(weights[0] )
lowercase__ = np.asarray(weights[1] )
lowercase__ = np.asarray(weights[2] )
lowercase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = weights[0][0][0]
lowercase__ = np.asarray(layer_norm_a[0] )
lowercase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# lsh weights + output
lowercase__ = weights[0][1]
if len(SCREAMING_SNAKE_CASE ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
# intermediate weighs
lowercase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE ) == 4:
lowercase__ = intermediate_weights[2]
# layernorm 2
lowercase__ = np.asarray(intermediate_weights[0][0] )
lowercase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate dense
lowercase__ = np.asarray(intermediate_weights[1][0] )
lowercase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate out
lowercase__ = np.asarray(intermediate_weights[4][0] )
lowercase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = torch_model.reformer
# word embeds
lowercase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE ):
lowercase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowercase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
lowercase__ = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE ) )
lowercase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowercase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# output layer norm
lowercase__ = np.asarray(weights[7][0] )
lowercase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# output embeddings
lowercase__ = np.asarray(weights[9][0] )
lowercase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'Building PyTorch model from configuration: {config}' )
lowercase__ = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
lowercase__ = pickle.load(SCREAMING_SNAKE_CASE )['''weights''']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 371 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase = 'Create a default config file for Accelerate with only a few flags set.'
def _a ( SCREAMING_SNAKE_CASE="no" , SCREAMING_SNAKE_CASE = default_json_config_file , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
lowercase__ = Path(SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
if path.exists():
print(
f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowercase__ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowercase__ = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
lowercase__ = torch.cuda.device_count()
lowercase__ = num_gpus
lowercase__ = False
if num_gpus > 1:
lowercase__ = '''MULTI_GPU'''
else:
lowercase__ = '''NO'''
elif is_xpu_available() and use_xpu:
lowercase__ = torch.xpu.device_count()
lowercase__ = num_xpus
lowercase__ = False
if num_xpus > 1:
lowercase__ = '''MULTI_XPU'''
else:
lowercase__ = '''NO'''
elif is_npu_available():
lowercase__ = torch.npu.device_count()
lowercase__ = num_npus
lowercase__ = False
if num_npus > 1:
lowercase__ = '''MULTI_NPU'''
else:
lowercase__ = '''NO'''
else:
lowercase__ = 0
lowercase__ = True
lowercase__ = 1
lowercase__ = '''NO'''
lowercase__ = ClusterConfig(**SCREAMING_SNAKE_CASE )
config.to_json_file(SCREAMING_SNAKE_CASE )
return path
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = parser.add_parser('''default''' , parents=SCREAMING_SNAKE_CASE , help=SCREAMING_SNAKE_CASE , formatter_class=SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=SCREAMING_SNAKE_CASE , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'accelerate configuration saved at {config_file}' )
| 93 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : Tuple , __lowercase : NestedDataStructureLike[PathLike] , __lowercase : Optional[NamedSplit] = None , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : Optional[int] = None , **__lowercase : str , ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case_ = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
snake_case_ = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self : int ):
"""simple docstring"""
if self.streaming:
snake_case_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
snake_case_ = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 187 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def __A ( self : List[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(SCREAMING_SNAKE_CASE__ )][self.get_x(SCREAMING_SNAKE_CASE__ )]
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_x * x )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 800, 600
SCREAMING_SNAKE_CASE__ : int = imread("image_data/lena.jpg", 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 270 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCamelCase_: Dict = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Dict = """sshleifer/tiny-gpt2"""
UpperCamelCase_: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case_ , multi_process=snake_case_ , )
UpperCamelCase_: Tuple = TensorFlowBenchmark(snake_case_ )
UpperCamelCase_: Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: int = """sgugger/tiny-distilbert-classification"""
UpperCamelCase_: Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , only_pretrain_model=snake_case_ , )
UpperCamelCase_: List[str] = TensorFlowBenchmark(snake_case_ )
UpperCamelCase_: Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = """sshleifer/tiny-gpt2"""
UpperCamelCase_: Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
UpperCamelCase_: int = TensorFlowBenchmark(snake_case_ )
UpperCamelCase_: Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Dict = """sshleifer/tiny-gpt2"""
UpperCamelCase_: str = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase_: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case_ , multi_process=snake_case_ , )
UpperCamelCase_: Union[str, Any] = TensorFlowBenchmark(snake_case_ , [config] )
UpperCamelCase_: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = """sshleifer/tiny-gpt2"""
UpperCamelCase_: Dict = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase_: Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
UpperCamelCase_: int = TensorFlowBenchmark(snake_case_ , [config] )
UpperCamelCase_: Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Union[str, Any] = """sshleifer/tiny-gpt2"""
UpperCamelCase_: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
UpperCamelCase_: List[str] = TensorFlowBenchmark(snake_case_ )
UpperCamelCase_: str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[int] = """sshleifer/tiny-gpt2"""
UpperCamelCase_: Union[str, Any] = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase_: Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
UpperCamelCase_: Optional[Any] = TensorFlowBenchmark(snake_case_ , [config] )
UpperCamelCase_: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Any = """patrickvonplaten/t5-tiny-random"""
UpperCamelCase_: Union[str, Any] = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase_: Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , )
UpperCamelCase_: Union[str, Any] = TensorFlowBenchmark(snake_case_ , configs=[config] )
UpperCamelCase_: Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[Any] = """sshleifer/tiny-gpt2"""
UpperCamelCase_: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=snake_case_ , multi_process=snake_case_ , )
UpperCamelCase_: List[str] = TensorFlowBenchmark(snake_case_ )
UpperCamelCase_: Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[Any] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case_ , save_to_csv=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(snake_case_ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(snake_case_ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(snake_case_ , """env.csv""" ) , multi_process=snake_case_ , )
UpperCamelCase_: List[str] = TensorFlowBenchmark(snake_case_ )
benchmark.run()
self.assertTrue(Path(os.path.join(snake_case_ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case_ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case_ , """env.csv""" ) ).exists() )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: int = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(snake_case_ : Optional[int] ):
self.assertTrue(hasattr(snake_case_ , """sequential""" ) )
self.assertTrue(hasattr(snake_case_ , """cumulative""" ) )
self.assertTrue(hasattr(snake_case_ , """current""" ) )
self.assertTrue(hasattr(snake_case_ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(snake_case_ , """log.txt""" ) , log_print=snake_case_ , trace_memory_line_by_line=snake_case_ , eager_mode=snake_case_ , multi_process=snake_case_ , )
UpperCamelCase_: Tuple = TensorFlowBenchmark(snake_case_ )
UpperCamelCase_: Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(snake_case_ , """log.txt""" ) ).exists() )
| 364 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(lowerCamelCase ) * abs(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 223 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
UpperCAmelCase__ = {
"camembert-base": 512,
}
UpperCAmelCase__ = "▁"
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , A : List[str] , A : Tuple="<s>" , A : str="</s>" , A : Dict="</s>" , A : int="<s>" , A : str="<unk>" , A : Optional[int]="<pad>" , A : List[Any]="<mask>" , A : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , A : Optional[Dict[str, Any]] = None , **A : List[str] , ) -> None:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(A))
_UpperCAmelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_UpperCAmelCase = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
_UpperCAmelCase = len(self.fairseq_tokens_to_ids)
_UpperCAmelCase = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _lowerCamelCase ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Any , A : List[int] , A : Optional[List[int]] = None , A : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A)
if token_ids_a is None:
return [1] + ([0] * len(A)) + [1]
return [1] + ([0] * len(A)) + [1, 1] + ([0] * len(A)) + [1]
def _lowerCamelCase ( self : Any , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def _lowerCamelCase ( self : int) -> int:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def _lowerCamelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(A): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _lowerCamelCase ( self : int , A : str) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A , out_type=A)
def _lowerCamelCase ( self : Tuple , A : int) -> Any:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A)
def _lowerCamelCase ( self : Any , A : Optional[int]) -> Optional[Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _lowerCamelCase ( self : List[Any] , A : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = ''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(A)
_UpperCAmelCase = False
out_string += self.sp_model.decode(A)
return out_string.strip()
def __getstate__( self : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Dict , A : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowerCamelCase ( self : Optional[Any] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , A)
elif not os.path.isfile(self.vocab_file):
with open(A , 'wb') as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A)
return (out_vocab_file,)
| 339 |
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
snake_case_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def A (__A : List[Any] ) -> Any:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_A ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def A (__A : Any ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCAmelCase_ = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
UpperCAmelCase_ = PipelineDataFormat.from_str(
format=_A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_A , _A )
class __snake_case ( lowerCamelCase__ ):
def __init__( self : Dict , _snake_case : Pipeline , _snake_case : PipelineDataFormat):
"""simple docstring"""
UpperCAmelCase_ = nlp
UpperCAmelCase_ = reader
@staticmethod
def lowerCamelCase ( _snake_case : ArgumentParser):
"""simple docstring"""
UpperCAmelCase_ = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''')
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''')
run_parser.add_argument('''--input''' , type=__snake_case , help='''Path to the file to use for inference''')
run_parser.add_argument('''--output''' , type=__snake_case , help='''Path to the file that will be used post to write results.''')
run_parser.add_argument('''--model''' , type=__snake_case , help='''Name or path to the model to instantiate.''')
run_parser.add_argument('''--config''' , type=__snake_case , help='''Name or path to the model\'s config to instantiate.''')
run_parser.add_argument(
'''--tokenizer''' , type=__snake_case , help='''Name of the tokenizer to use. (default: same as the model name)''')
run_parser.add_argument(
'''--column''' , type=__snake_case , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=__snake_case , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=__snake_case , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''')
run_parser.set_defaults(func=__snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self._nlp, []
for entry in self._reader:
UpperCAmelCase_ = nlp(**__snake_case) if self._reader.is_multi_columns else nlp(__snake_case)
if isinstance(__snake_case , __snake_case):
outputs.append(__snake_case)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCAmelCase_ = self._reader.save_binary(__snake_case)
logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""")
else:
self._reader.save(__snake_case)
| 357 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = LDMTextToImagePipeline
__lowercase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
__lowercase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
__lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = LDMTextToImagePipeline(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_snake_case = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = torch.manual_seed(lowerCAmelCase_ )
_snake_case = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 32, 32) )
_snake_case = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
_snake_case = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
_snake_case = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = torch.manual_seed(lowerCAmelCase_ )
_snake_case = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 32, 32) )
_snake_case = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images[0]
_snake_case = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_snake_case = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 42 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE__ ( __A = 1_500_000 ) -> int:
_snake_case = defaultdict(__A )
_snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
_snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 | 1 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , _UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__lowerCAmelCase : Tuple = dataset_size < in_memory_max_size
else:
__lowerCAmelCase : str = False
__lowerCAmelCase : Optional[int] = is_small_dataset(_UpperCamelCase )
assert result == expected | 182 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[Any] = KandinskyVaaInpaintPipeline
A_ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
A_ : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
A_ : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A_ : Any = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 1_00
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCAmelCase : Any = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.dummy_unet
__lowerCAmelCase : Optional[Any] = self.dummy_movq
__lowerCAmelCase : Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='epsilon' , thresholding=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : str = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase : Dict = np.ones((64, 64) , dtype=np.floataa )
__lowerCAmelCase : List[str] = 0
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = output.images
__lowerCAmelCase : Any = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : str = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__lowerCAmelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowerCAmelCase : Any = np.ones((7_68, 7_68) , dtype=np.floataa )
__lowerCAmelCase : int = 0
__lowerCAmelCase : str = 'a hat'
__lowerCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__lowerCAmelCase : Tuple = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase : Any = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCAmelCase : Tuple = pipeline(
image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
__lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 182 | 1 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case__ ( __snake_case ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=0 ) -> Dict:
__magic_name__ : str = 1.0 if scale is None else scale
__magic_name__ : List[str] = 0.0 if loc is None else loc
super().__init__(a_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=a_ )] )
@property
def __magic_name__ ( self ) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def __magic_name__ ( self ) -> Optional[Any]:
return self.base_dist.variance * self.scale**2
@property
def __magic_name__ ( self ) -> str:
return self.variance.sqrt()
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
super().__init__(**a_ )
__magic_name__ : Tuple = args_dim
__magic_name__ : Tuple = nn.ModuleList([nn.Linear(a_ , a_ ) for dim in args_dim.values()] )
__magic_name__ : Any = domain_map
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : Optional[Any] = [proj(a_ ) for proj in self.proj]
return self.domain_map(*a_ )
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
super().__init__()
__magic_name__ : Union[str, Any] = function
def __magic_name__ ( self , lowerCAmelCase__ , *lowerCAmelCase__ ) -> Optional[int]:
return self.function(a_ , *a_ )
class snake_case__ :
lowercase__ : Union[str, Any] = 42
lowercase__ : int = 42
lowercase__ : int = 42
def __init__( self , lowerCAmelCase__ = 1 ) -> Optional[int]:
__magic_name__ : Optional[Any] = dim
__magic_name__ : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if self.dim == 1:
return self.distribution_class(*a_ )
else:
return Independent(self.distribution_class(*a_ ) , 1 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Tuple:
__magic_name__ : Tuple = self._base_distribution(a_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(a_ , loc=a_ , scale=a_ , event_dim=self.event_dim )
@property
def __magic_name__ ( self ) -> int:
return () if self.dim == 1 else (self.dim,)
@property
def __magic_name__ ( self ) -> int:
return len(self.event_shape )
@property
def __magic_name__ ( self ) -> Optional[int]:
return 0.0
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return ParameterProjection(
in_features=a_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __magic_name__ ( self , *lowerCAmelCase__ ) -> Dict:
raise NotImplementedError()
@staticmethod
def __magic_name__ ( lowerCAmelCase__ ) -> List[str]:
return (x + torch.sqrt(torch.square(a_ ) + 4.0 )) / 2.0
class snake_case__ ( __snake_case ):
lowercase__ : Optional[int] = {'''df''': 1, '''loc''': 1, '''scale''': 1}
lowercase__ : Optional[int] = StudentT
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
__magic_name__ : Optional[int] = cls.squareplus(a_ ).clamp_min(torch.finfo(scale.dtype ).eps )
__magic_name__ : Optional[int] = 2.0 + cls.squareplus(a_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case__ ( __snake_case ):
lowercase__ : str = {'''loc''': 1, '''scale''': 1}
lowercase__ : int = Normal
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = cls.squareplus(a_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case__ ( __snake_case ):
lowercase__ : Union[str, Any] = {'''total_count''': 1, '''logits''': 1}
lowercase__ : Any = NegativeBinomial
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Dict = cls.squareplus(a_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
__magic_name__ : List[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=a_ , logits=a_ )
else:
return Independent(self.distribution_class(total_count=a_ , logits=a_ ) , 1 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> str:
__magic_name__ : Tuple = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 342 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='new-model'
if is_tf_available():
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =NewModelConfig
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''bert-base-cased'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : int = TFAutoModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''bert-base-cased'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : List[Any] = TFAutoModelForPreTraining.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Dict = TFAutoModelForCausalLM.from_pretrained(a_ )
__snake_case , __snake_case : int = TFAutoModelForCausalLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(a_ )
__snake_case , __snake_case : int = TFAutoModelForMaskedLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a_ )
__snake_case , __snake_case : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case : Any = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Dict = TFAutoModelForSequenceClassification.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case : Optional[int] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
@require_tensorflow_probability
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
__snake_case : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(a_ )
__snake_case , __snake_case : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
a_ , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a_ ) , 1_44_10 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a_ ) , 1_44_10 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(a_ , a_ )
__snake_case : Optional[Any] = copy.deepcopy(model.config )
__snake_case : int = ['''FunnelBaseModel''']
__snake_case : Any = TFAutoModel.from_config(a_ )
self.assertIsInstance(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ )
__snake_case : Dict = TFAutoModel.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
try:
AutoConfig.register('''new-model''' , a_ )
__snake_case : List[str] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a_ ):
auto_class.register(a_ , a_ )
auto_class.register(a_ , a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
auto_class.register(a_ , a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case : Union[str, Any] = BertModelTester(self ).get_config()
__snake_case : str = NewModelConfig(**tiny_config.to_dict() )
__snake_case : Optional[int] = auto_class.from_config(a_ )
self.assertIsInstance(a_ , a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ )
__snake_case : Optional[int] = auto_class.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(
a_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__snake_case : Any = TFAutoModel.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(
a_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__snake_case : Dict = TFAutoModel.from_pretrained(a_ , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(
a_ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__snake_case : Any = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with self.assertRaisesRegex(a_ , '''Use `from_pt=True` to load this model''' ):
__snake_case : Dict = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__snake_case : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__snake_case : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__snake_case : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 102 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_input_mask
UpperCamelCase = use_labels
UpperCamelCase = use_mc_token_ids
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
UpperCamelCase = self.vocab_size - 1
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
if self.use_mc_token_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
UpperCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A__ ( self ) -> int:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = CTRLModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = CTRLLMHeadModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
UpperCamelCase
) = config_and_inputs
UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = CTRLForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = CTRLModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 )
def A__ ( self ) -> Any:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*SCREAMING_SNAKE_CASE_ )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
pass
@slow
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = CTRLModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
@require_torch
class a_ ( unittest.TestCase ):
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # Legal the president is
UpperCamelCase = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCamelCase = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE_ )
| 355 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> int:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 183 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( A__ ):
def __init__( self : int,_A : AutoencoderKL,_A : CLIPTextModel,_A : CLIPTokenizer,_A : UNetaDConditionModel,_A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],_A : StableDiffusionSafetyChecker,_A : CLIPImageProcessor,):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_A,text_encoder=_A,tokenizer=_A,unet=_A,scheduler=_A,safety_checker=_A,feature_extractor=_A,)
def __UpperCamelCase ( self : Tuple,_A : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self : int,_A : Union[str, List[str]],_A : int = 512,_A : int = 512,_A : int = 50,_A : float = 7.5,_A : Optional[Union[str, List[str]]] = None,_A : Optional[int] = 1,_A : float = 0.0,_A : Optional[torch.Generator] = None,_A : Optional[torch.FloatTensor] = None,_A : Optional[str] = "pil",_A : bool = True,_A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None,_A : int = 1,_A : Optional[torch.FloatTensor] = None,**_A : List[str],):
"""simple docstring"""
if isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : Tuple = 1
elif isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : str = len(_A )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_A )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A,_A ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_A )}.' )
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(
_A,padding="max_length",max_length=self.tokenizer.model_max_length,return_tensors="pt",)
SCREAMING_SNAKE_CASE_ : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
SCREAMING_SNAKE_CASE_ : List[str] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = text_embeddings.shape
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_embeddings.repeat(1,_A,1 )
SCREAMING_SNAKE_CASE_ : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt,_A,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE_ : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : List[str] = 42
if negative_prompt is None:
SCREAMING_SNAKE_CASE_ : int = [""]
elif type(_A ) is not type(_A ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !='
F' {type(_A )}.' )
elif isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
SCREAMING_SNAKE_CASE_ : Dict = negative_prompt
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(
_A,padding="max_length",max_length=_A,truncation=_A,return_tensors="pt",)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ : Dict = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE_ : Tuple = uncond_embeddings.repeat(_A,_A,1 )
SCREAMING_SNAKE_CASE_ : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt,_A,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE_ : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE_ : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
SCREAMING_SNAKE_CASE_ : Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE_ : str = torch.randn(
_A,generator=_A,device="cpu",dtype=_A ).to(self.device )
SCREAMING_SNAKE_CASE_ : Any = torch.randn(_A,generator=_A,device="cpu",dtype=_A ).to(
self.device )
else:
SCREAMING_SNAKE_CASE_ : Any = torch.randn(
_A,generator=_A,device=self.device,dtype=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.randn(_A,generator=_A,device=self.device,dtype=_A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
SCREAMING_SNAKE_CASE_ : str = latents_reference.to(self.device )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
SCREAMING_SNAKE_CASE_ : Optional[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
SCREAMING_SNAKE_CASE_ : str = (latents_shape[2] - latents_shape_reference[2]) // 2
SCREAMING_SNAKE_CASE_ : Any = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
SCREAMING_SNAKE_CASE_ : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
SCREAMING_SNAKE_CASE_ : int = 0 if dx < 0 else dx
SCREAMING_SNAKE_CASE_ : str = 0 if dy < 0 else dy
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max(-dx,0 )
SCREAMING_SNAKE_CASE_ : Tuple = max(-dy,0 )
# import pdb
# pdb.set_trace()
SCREAMING_SNAKE_CASE_ : Optional[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE_ : Dict = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_ : Optional[int] = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.scheduler.scale_model_input(_A,_A )
# predict the noise residual
SCREAMING_SNAKE_CASE_ : Dict = self.unet(_A,_A,encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : int = self.scheduler.step(_A,_A,_A,**_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A,_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = 1 / 0.18215 * latents
SCREAMING_SNAKE_CASE_ : Dict = self.vae.decode(_A ).sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (image / 2 + 0.5).clamp(0,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_ : str = image.cpu().permute(0,2,3,1 ).float().numpy()
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor(self.numpy_to_pil(_A ),return_tensors="pt" ).to(
self.device )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.safety_checker(
images=_A,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
SCREAMING_SNAKE_CASE_ : int = None
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.numpy_to_pil(_A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_A,nsfw_content_detected=_A )
| 18 |
def lowerCAmelCase ( _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_lowerCAmelCase , (list, tuple) ) or not all(
isinstance(_lowerCAmelCase , _lowerCAmelCase ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
UpperCAmelCase__ = UpperCAmelCase__ = UpperCAmelCase__ = numbers[0]
for i in range(1 , len(_lowerCAmelCase ) ):
# update the maximum and minimum subarray products
UpperCAmelCase__ = numbers[i]
if number < 0:
UpperCAmelCase__ , UpperCAmelCase__ = min_till_now, max_till_now
UpperCAmelCase__ = max(_lowerCAmelCase , max_till_now * number )
UpperCAmelCase__ = min(_lowerCAmelCase , min_till_now * number )
# update the maximum product found till now
UpperCAmelCase__ = max(_lowerCAmelCase , _lowerCAmelCase )
return max_prod
| 169 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
__lowerCAmelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __a :
__lowercase : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__lowercase : Optional[str] = field(default=__UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=__UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__lowercase : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__lowercase : Optional[int] = field(
default=__UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=__UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: str = {}
if self.train_dir is not None:
lowercase__: List[Any] = self.train_dir
if self.validation_dir is not None:
lowercase__: Optional[Any] = self.validation_dir
lowercase__: Any = data_files if data_files else None
@dataclass
class __a :
__lowercase : str = field(
default=__UpperCamelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__UpperCamelCase )} , )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : str = field(default=__UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=__UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : Optional[int] = field(
default=__UpperCamelCase , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__lowercase : Optional[int] = field(
default=__UpperCamelCase , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__lowercase : Optional[int] = field(
default=__UpperCamelCase , metadata={'help': 'Stride to use for the encoder.'} , )
class __a :
def __init__( self , lowerCAmelCase__=192 , lowerCAmelCase__=32 , lowerCAmelCase__=4 , lowerCAmelCase__=0.6 ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = input_size
lowercase__: List[str] = mask_patch_size
lowercase__: Dict = model_patch_size
lowercase__: int = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
lowercase__: Union[str, Any] = self.input_size // self.mask_patch_size
lowercase__: List[str] = self.mask_patch_size // self.model_patch_size
lowercase__: Optional[Any] = self.rand_size**2
lowercase__: Dict = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: List[str] = np.random.permutation(self.token_count )[: self.mask_count]
lowercase__: Dict = np.zeros(self.token_count , dtype=lowerCAmelCase__ )
lowercase__: str = 1
lowercase__: List[Any] = mask.reshape((self.rand_size, self.rand_size) )
lowercase__: Union[str, Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def snake_case_ ( snake_case ) -> int:
lowercase__: List[str] = torch.stack([example['pixel_values'] for example in examples] )
lowercase__: Optional[int] = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def snake_case_ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__: Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__: Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__: List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__: List[str] = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase__: Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__: Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
lowercase__: List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__: int = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0:
lowercase__: Union[str, Any] = ds['train'].train_test_split(data_args.train_val_split )
lowercase__: List[str] = split['train']
lowercase__: Any = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__: Optional[Any] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case )
elif model_args.model_name_or_path:
lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
lowercase__: Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case , 'decoder_type' ):
lowercase__: int = 'simmim'
# adapt config
lowercase__: Optional[Any] = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase__: Any = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase__: List[str] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase__: Tuple = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case )
elif model_args.model_name_or_path:
lowercase__: List[str] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case )
else:
lowercase__: List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase__: Dict = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase__: Tuple = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowercase__: Dict = AutoModelForMaskedImageModeling.from_config(snake_case )
if training_args.do_train:
lowercase__: List[Any] = ds['train'].column_names
else:
lowercase__: List[Any] = ds['validation'].column_names
if data_args.image_column_name is not None:
lowercase__: Optional[Any] = data_args.image_column_name
elif "image" in column_names:
lowercase__: Optional[Any] = 'image'
elif "img" in column_names:
lowercase__: List[str] = 'img'
else:
lowercase__: Optional[Any] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase__: Any = Compose(
[
Lambda(lambda snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.6_7, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase__: Tuple = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case ):
lowercase__: str = [transforms(snake_case ) for image in examples[image_column_name]]
lowercase__: List[Any] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowercase__: Any = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowercase__: Optional[int] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case )
# Initialize our trainer
lowercase__: Union[str, Any] = Trainer(
model=snake_case , args=snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
lowercase__: int = None
if training_args.resume_from_checkpoint is not None:
lowercase__: Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__: Union[str, Any] = last_checkpoint
lowercase__: int = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__: Dict = trainer.evaluate()
trainer.log_metrics('eval' , snake_case )
trainer.save_metrics('eval' , snake_case )
# Write model card and (optionally) push to hub
lowercase__: List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 288 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCAmelCase = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCAmelCase = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
__lowerCAmelCase = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = CHRF.CHAR_ORDER , lowerCAmelCase__ = CHRF.WORD_ORDER , lowerCAmelCase__ = CHRF.BETA , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> List[Any]:
'''simple docstring'''
lowercase__: str = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowercase__: List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
lowercase__: Union[str, Any] = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: str = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 288 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCamelCase = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCamelCase = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCamelCase = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowerCamelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]="uniform_average" , lowercase_ : Tuple=True ) -> Any:
"""simple docstring"""
_lowerCamelCase : List[str] =mean_squared_error(
lowercase_ , lowercase_ , sample_weight=lowercase_ , multioutput=lowercase_ , squared=lowercase_ )
return {"mse": mse}
| 199 | 0 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE_: List[str] =getLogger(__name__)
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : str , snake_case_ : int = 8 , snake_case_ : int = 10_24 , snake_case_ : List[Any]="val" , snake_case_ : int=None , snake_case_ : Tuple=False , snake_case_ : Optional[Any]="summarization" , snake_case_ : Optional[Any]=None , snake_case_ : Any=1 , snake_case_ : Dict = None , snake_case_ : Union[str, Any]="" , **snake_case_ : Any , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=snake_case_ )
UpperCAmelCase_ = Path(snake_case_ )
UpperCAmelCase_ = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(snake_case_ )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
UpperCAmelCase_ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_ , snake_case_ ) # update config with task specific params
UpperCAmelCase_ = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase_ = num_return_sequences
UpperCAmelCase_ = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase_ = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase_ = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase_ = SeqaSeqDataset(
snake_case_ , snake_case_ , snake_case_ , max_target_length=10_24 , type_path=snake_case_ , n_obs=snake_case_ , prefix=snake_case_ , **snake_case_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase_ = ds.make_sortish_sampler(snake_case_ , distributed=snake_case_ , add_extra_examples=snake_case_ , shuffle=snake_case_ )
UpperCAmelCase_ = DataLoader(snake_case_ , sampler=snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn )
UpperCAmelCase_ = []
for batch in tqdm(snake_case_ ):
UpperCAmelCase_ = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=snake_case_ , num_beams=snake_case_ , **snake_case_ , )
UpperCAmelCase_ = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
UpperCAmelCase_ = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase_ = chunks(snake_case_ , snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(snake_case_ , snake_case_ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=snake_case_ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=snake_case_ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=snake_case_ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=snake_case_ , default=snake_case_ )
parser.add_argument(
"--type_path" , type=snake_case_ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=snake_case_ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=snake_case_ , default=8 , required=snake_case_ , help="batch size" )
parser.add_argument(
"--local_rank" , type=snake_case_ , default=-1 , required=snake_case_ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=snake_case_ , default=snake_case_ , required=snake_case_ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=snake_case_ , default=1 , required=snake_case_ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=snake_case_ , default=6_00 , required=snake_case_ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=snake_case_ , default=snake_case_ , required=snake_case_ )
parser.add_argument("--tgt_lang" , type=snake_case_ , default=snake_case_ , required=snake_case_ )
parser.add_argument(
"--prefix" , type=snake_case_ , required=snake_case_ , default=snake_case_ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase_ = time.time()
UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_known_args()
UpperCAmelCase_ = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
UpperCAmelCase_ = Path(args.save_dir + "_tmp" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
UpperCAmelCase_ = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase_ = {}
if args.src_lang is not None:
UpperCAmelCase_ = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase_ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = eval_data_dir(
args.data_dir , snake_case_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case_ , **snake_case_ , )
if args.local_rank <= 0:
UpperCAmelCase_ = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
UpperCAmelCase_ = gather_results_from_each_node(snake_case_ , snake_case_ , args.sync_timeout )
UpperCAmelCase_ = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
UpperCAmelCase_ = save_dir.joinpath("pseudolabel_results.json" )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(snake_case_ , snake_case_ )
return
UpperCAmelCase_ = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(snake_case_ ) as f:
UpperCAmelCase_ = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase_ = "translation" in args.task
UpperCAmelCase_ = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase_ = "bleu" if calc_bleu else "rouge"
UpperCAmelCase_ = score_fn(snake_case_ , snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = time.time() - start_time
UpperCAmelCase_ = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase_ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase_ = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(snake_case_ , snake_case_ , indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_ , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(snake_case_ , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> List:
'''simple docstring'''
UpperCAmelCase_ = []
for partial_result in partial_results:
records.extend(snake_case_ )
UpperCAmelCase_ = sorted(snake_case_ , key=lambda snake_case_ : x["id"] )
UpperCAmelCase_ = [x["pred"] for x in records]
return preds
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Any ) -> List[Dict[str, List]]:
'''simple docstring'''
UpperCAmelCase_ = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase_ = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase_ = list(save_dir.glob("rank_*.json" ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase_ = lmap(snake_case_ , snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 106 | '''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
SCREAMING_SNAKE_CASE_: Optional[Any] =OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModel)
class __A ( _BaseAutoModelClass ):
a__ : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Tuple =auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __A ( _BaseAutoModelClass ):
a__ : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_: Any =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_: int =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Union[str, Any] =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 106 | 1 |
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = len(_A )
lowerCAmelCase_ = len(_A )
lowerCAmelCase_ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCAmelCase_ = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 278 |
import argparse
from collections import defaultdict
import yaml
_A = '''docs/source/en/_toctree.yml'''
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = defaultdict(_A )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase_ = [key for key, value in counts.items() if value > 1]
lowerCAmelCase_ = []
for duplicate_key in duplicates:
lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(_A , key=lambda _A : s["title"].lower() )
def __UpperCamelCase ( _A=False ):
with open(_A , encoding='''utf-8''' ) as f:
lowerCAmelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ = content[api_idx]['''sections''']
# Then to the model doc
lowerCAmelCase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase_ = api_doc[model_idx]['''sections''']
lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section]
lowerCAmelCase_ = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase_ = modality_doc['''sections''']
lowerCAmelCase_ = clean_model_doc_toc(_A )
if old_modality_doc != new_modality_doc:
lowerCAmelCase_ = True
if overwrite:
lowerCAmelCase_ = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase_ = model_doc
lowerCAmelCase_ = api_doc
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_A = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 278 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : List[str] = False ):
'''simple docstring'''
if not arr:
return 0
SCREAMING_SNAKE_CASE__ : Any =0 if allow_empty_subarrays else float('''-inf''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0.0
for num in arr:
SCREAMING_SNAKE_CASE__ : int =max(0 if allow_empty_subarrays else num, curr_sum + num )
SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__snake_case, __snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''') | 364 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 222 | 0 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def lowercase_ ( self , lowerCamelCase__=0 ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) )
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((128, 128) )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowerCamelCase = init_image.resize((128, 128) )
__lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
__lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 'A fantasy landscape, trending on artstation'
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='np' , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 90 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 42
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
@register_to_config
def __init__( self : Optional[int], lowerCAmelCase : int = 32, lowerCAmelCase : int = 64, lowerCAmelCase : int = 20, lowerCAmelCase : int = 768, lowerCAmelCase : Optional[Any]=77, lowerCAmelCase : Tuple=4, lowerCAmelCase : float = 0.0, lowerCAmelCase : str = "silu", lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = "linear", lowerCAmelCase : Optional[str] = "prd", lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[int] = None, ) -> List[Any]:
super().__init__()
lowercase : List[Any] = num_attention_heads
lowercase : int = attention_head_dim
lowercase : List[Any] = num_attention_heads * attention_head_dim
lowercase : Tuple = additional_embeddings
lowercase : Dict = time_embed_dim or inner_dim
lowercase : Optional[Any] = embedding_proj_dim or embedding_dim
lowercase : int = clip_embed_dim or embedding_dim
lowercase : List[str] = Timesteps(lowerCAmelCase, lowerCAmelCase, 0 )
lowercase : List[str] = TimestepEmbedding(lowerCAmelCase, lowerCAmelCase, out_dim=lowerCAmelCase, act_fn=lowerCAmelCase )
lowercase : List[str] = nn.Linear(lowerCAmelCase, lowerCAmelCase )
if embedding_proj_norm_type is None:
lowercase : str = None
elif embedding_proj_norm_type == "layer":
lowercase : Tuple = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
lowercase : List[str] = nn.Linear(lowerCAmelCase, lowerCAmelCase )
if encoder_hid_proj_type is None:
lowercase : Optional[int] = None
elif encoder_hid_proj_type == "linear":
lowercase : Dict = nn.Linear(lowerCAmelCase, lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
lowercase : Dict = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, lowerCAmelCase ) )
if added_emb_type == "prd":
lowercase : Union[str, Any] = nn.Parameter(torch.zeros(1, 1, lowerCAmelCase ) )
elif added_emb_type is None:
lowercase : str = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
lowercase : Dict = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, dropout=lowerCAmelCase, activation_fn='gelu', attention_bias=lowerCAmelCase, )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
lowercase : str = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
lowercase : Optional[int] = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
lowercase : int = nn.LayerNorm(lowerCAmelCase )
lowercase : str = nn.Linear(lowerCAmelCase, lowerCAmelCase )
lowercase : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -1_0000.0 )
causal_attention_mask.triu_(1 )
lowercase : List[str] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask', lowerCAmelCase, persistent=lowerCAmelCase )
lowercase : Any = nn.Parameter(torch.zeros(1, lowerCAmelCase ) )
lowercase : Any = nn.Parameter(torch.zeros(1, lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase ( self : Tuple ) -> Dict[str, AttentionProcessor]:
lowercase : Any = {}
def fn_recursive_add_processors(lowerCAmelCase : str, lowerCAmelCase : torch.nn.Module, lowerCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(lowerCAmelCase, 'set_processor' ):
lowercase : List[str] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''', lowerCAmelCase, lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
return processors
def lowercase ( self : Union[str, Any], lowerCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Tuple:
lowercase : str = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase : str, lowerCAmelCase : torch.nn.Module, lowerCAmelCase : Union[str, Any] ):
if hasattr(lowerCAmelCase, 'set_processor' ):
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''', lowerCAmelCase, lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
self.set_attn_processor(AttnProcessor() )
def lowercase ( self : Any, lowerCAmelCase : int, lowerCAmelCase : Union[torch.Tensor, float, int], lowerCAmelCase : torch.FloatTensor, lowerCAmelCase : Optional[torch.FloatTensor] = None, lowerCAmelCase : Optional[torch.BoolTensor] = None, lowerCAmelCase : bool = True, ) -> List[Any]:
lowercase : Optional[Any] = hidden_states.shape[0]
lowercase : Union[str, Any] = timestep
if not torch.is_tensor(lowerCAmelCase ):
lowercase : List[str] = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
lowercase : List[str] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase : Optional[int] = timesteps * torch.ones(lowerCAmelCase, dtype=timesteps.dtype, device=timesteps.device )
lowercase : Dict = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowercase : Optional[int] = timesteps_projected.to(dtype=self.dtype )
lowercase : Any = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
lowercase : Any = self.embedding_proj_norm(lowerCAmelCase )
lowercase : List[str] = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowercase : str = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowercase : Optional[Any] = self.proj_in(lowerCAmelCase )
lowercase : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
lowercase : Dict = []
lowercase : Optional[int] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowercase : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowercase : Union[str, Any] = hidden_states[:, None, :]
lowercase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowercase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase, -1, -1 )
additional_embeds.append(lowerCAmelCase )
lowercase : Union[str, Any] = torch.cat(
lowerCAmelCase, dim=1, )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowercase : Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowercase : List[Any] = F.pad(
lowerCAmelCase, (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
), value=0.0, )
lowercase : str = hidden_states + positional_embeddings
if attention_mask is not None:
lowercase : Tuple = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
lowercase : List[Any] = F.pad(lowerCAmelCase, (0, self.additional_embeddings), value=0.0 )
lowercase : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowercase : Union[str, Any] = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0 )
if self.norm_in is not None:
lowercase : List[Any] = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
lowercase : Tuple = block(lowerCAmelCase, attention_mask=lowerCAmelCase )
lowercase : Optional[Any] = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
lowercase : Optional[Any] = hidden_states[:, -1]
else:
lowercase : Any = hidden_states[:, additional_embeddings_len:]
lowercase : Optional[int] = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : Dict ) -> Dict:
lowercase : int = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 255 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : str = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Optional[int] = 1_6
lowercase__ : List[str] = 3_2
def UpperCamelCase_ ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase_ : Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : Dict = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : str = 8
else:
lowerCAmelCase_ : str = None
return tokenizer.pad(
lowerCAmelCase__ , padding='longest' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : int = mocked_dataloaders # noqa: F811
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase__ ) == "1":
lowerCAmelCase_ : Optional[int] = 2
# Initialize accelerator
lowerCAmelCase_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Optional[int] = config['lr']
lowerCAmelCase_ : Tuple = int(config['num_epochs'] )
lowerCAmelCase_ : int = int(config['seed'] )
lowerCAmelCase_ : str = int(config['batch_size'] )
lowerCAmelCase_ : str = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase__ )
def inner_training_loop(lowerCAmelCase__ : Optional[int] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : int = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : List[Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
lowerCAmelCase_ ,lowerCAmelCase_ : Any = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate scheduler
lowerCAmelCase_ : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ : List[str] = model(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = outputs.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowerCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , lowerCAmelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase_ : str = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 289 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.