code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=10 , lowercase_=[10, 20, 30, 40] , lowercase_=[1, 1, 2, 1] , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=3 , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : Union[str, Any] = embeddings_size
UpperCAmelCase_ : str = hidden_sizes
UpperCAmelCase_ : List[Any] = depths
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : Tuple = len(lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values
def UpperCamelCase__ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = FlaxRegNetModel(config=lowercase_ )
UpperCAmelCase_ : List[str] = model(lowercase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : str = FlaxRegNetForImageClassification(config=lowercase_ )
UpperCAmelCase_ : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = FlaxRegNetModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
"""simple docstring"""
return
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ )
UpperCAmelCase_ : str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
UpperCAmelCase_ : List[str] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : int = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ , **lowercase_ ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : Dict = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : int = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( ):
UpperCAmelCase_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
UpperCAmelCase_ : str = self.default_image_processor
UpperCAmelCase_ : Optional[Any] = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=lowercase_ , return_tensors="np" )
UpperCAmelCase_ : str = model(**lowercase_ )
# verify the logits
UpperCAmelCase_ : Tuple = (1, 1000)
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase_ : List[str] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 61 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_A = TypeVar('KEY')
_A = TypeVar('VAL')
@dataclass(frozen=A_ , slots=A_ )
class UpperCAmelCase__ ( Generic[KEY, VAL] ):
"""simple docstring"""
UpperCAmelCase__ : KEY
UpperCAmelCase__ : VAL
class UpperCAmelCase__ ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
super().__init__(A_ , A_ )
def __bool__( self ) -> bool:
return False
_A = _DeletedItem()
class UpperCAmelCase__ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , A_ = 8 , A_ = 0.75 ) -> None:
__UpperCamelCase =initial_block_size
__UpperCamelCase =[None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase =capacity_factor
__UpperCamelCase =0
def _a ( self , A_ ) -> int:
return hash(A_ ) % len(self._buckets )
def _a ( self , A_ ) -> int:
return (ind + 1) % len(self._buckets )
def _a ( self , A_ , A_ , A_ ) -> bool:
__UpperCamelCase =self._buckets[ind]
if not stored:
__UpperCamelCase =_Item(A_ , A_ )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase =_Item(A_ , A_ )
return True
else:
return False
def _a ( self ) -> bool:
__UpperCamelCase =len(self._buckets ) * self._capacity_factor
return len(self ) >= int(A_ )
def _a ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase =len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _a ( self , A_ ) -> None:
__UpperCamelCase =self._buckets
__UpperCamelCase =[None] * new_size
__UpperCamelCase =0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _a ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def _a ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def _a ( self , A_ ) -> Iterator[int]:
__UpperCamelCase =self._get_bucket_index(A_ )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase =self._get_next_ind(A_ )
def _a ( self , A_ , A_ ) -> None:
for ind in self._iterate_buckets(A_ ):
if self._try_set(A_ , A_ , A_ ):
break
def __setitem__( self , A_ , A_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(A_ , A_ )
def __delitem__( self , A_ ) -> None:
for ind in self._iterate_buckets(A_ ):
__UpperCamelCase =self._buckets[ind]
if item is None:
raise KeyError(A_ )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase =_deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , A_ ) -> VAL:
for ind in self._iterate_buckets(A_ ):
__UpperCamelCase =self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
__UpperCamelCase =' ,'.join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 62 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[str] , __a : str , __a : int = 13 , __a : int = 64 , __a : int = 2 , __a : int = 3 , __a : int = 3 , __a : bool = True , __a : bool = True , __a : int = 1_28 , __a : List[str]=[16, 32, 64, 1_28] , __a : int = 7 , __a : int = 4 , __a : int = 37 , __a : str = "gelu" , __a : float = 0.1 , __a : float = 0.1 , __a : int = 10 , __a : float = 0.02 , __a : int = 2 , __a : int = 1 , __a : int = 1_28 , __a : List[int] = [2, 2, 2, 2] , __a : int = 2 , __a : int = 2 , ):
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = encoder_stride
_a = num_attention_outputs
_a = embed_dim
_a = embed_dim + 1
_a = resolution
_a = depths
_a = hidden_sizes
_a = dim
_a = mlp_expansion_ratio
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self : str ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCamelCase__ ( self : Any , __a : str , __a : Tuple , __a : Optional[int] ):
_a = TFEfficientFormerModel(config=__a )
_a = model(__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Tuple ):
_a = self.type_sequence_label_size
_a = TFEfficientFormerForImageClassification(__a )
_a = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = TFEfficientFormerForImageClassification(__a )
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self : Tuple ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__a =(
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__a =False
__a =False
__a =False
__a =False
__a =False
def UpperCamelCase__ ( self : Any ):
_a = TFEfficientFormerModelTester(self )
_a = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def UpperCamelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def UpperCamelCase__ ( self : List[str] ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def UpperCamelCase__ ( self : str ):
pass
def UpperCamelCase__ ( self : Optional[Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ ( self : Optional[Any] ):
def check_hidden_states_output(__a : int , __a : Union[str, Any] , __a : Optional[Any] ):
_a = model_class(__a )
_a = model(**self._prepare_for_class(__a , __a ) , training=__a )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
if hasattr(self.model_tester , "encoder_seq_length" ):
_a = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
_a = seq_length * self.model_tester.chunk_length
else:
_a = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_a = outputs.decoder_hidden_states
self.asseretIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , __a )
_a = getattr(self.model_tester , "seq_length" , __a )
_a = getattr(self.model_tester , "decoder_seq_length" , __a )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(__a , __a , __a )
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict , __a : List[str]=False ):
_a = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self : Dict ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def UpperCamelCase__ ( self : str ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFEfficientFormerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ ( self : List[str] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , "seq_length" , __a )
_a = getattr(self.model_tester , "encoder_seq_length" , __a )
_a = getattr(self.model_tester , "key_length" , __a )
_a = getattr(self.model_tester , "chunk_length" , __a )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
_a = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(__a )
_a = model(**self._prepare_for_class(__a , __a ) , training=__a )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(__a )
_a = model(**self._prepare_for_class(__a , __a ) , training=__a )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCamelCase__ ( self : Tuple ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_a = model_class(__a )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_a = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_a = model(__a )
self.assertTrue(outputs_dict is not None )
def _lowerCamelCase ( ) -> str:
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self : Optional[Any] ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self : Tuple ):
_a = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="tf" )
# forward pass
_a = model(**__a , training=__a )
# verify the logits
_a = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
_a = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="tf" )
# forward pass
_a = model(**__a , training=__a )
# verify the logits
_a = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
_a = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 63 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = 'altclip_text_model'
def __init__(self : List[str] , __UpperCAmelCase : str=2_5_0_0_0_2 , __UpperCAmelCase : str=1_0_2_4 , __UpperCAmelCase : Dict=2_4 , __UpperCAmelCase : int=1_6 , __UpperCAmelCase : Optional[Any]=4_0_9_6 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Optional[int]=5_1_4 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Optional[Any]=1E-05 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Optional[Any]="absolute" , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=7_6_8 , **__UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = project_dim
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = 'altclip_vision_model'
def __init__(self : str , __UpperCAmelCase : List[Any]=7_6_8 , __UpperCAmelCase : Optional[Any]=3_0_7_2 , __UpperCAmelCase : Union[str, Any]=5_1_2 , __UpperCAmelCase : List[str]=1_2 , __UpperCAmelCase : Optional[int]=1_2 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : List[str]=2_2_4 , __UpperCAmelCase : Union[str, Any]=3_2 , __UpperCAmelCase : Optional[Any]="quick_gelu" , __UpperCAmelCase : Optional[Any]=1E-5 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Optional[Any]=1.0 , **__UpperCAmelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = hidden_act
@classmethod
def lowercase_ (cls : Any , __UpperCAmelCase : Union[str, os.PathLike] , **__UpperCAmelCase : Optional[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
UpperCAmelCase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = 'altclip'
__UpperCAmelCase : Union[str, Any] = True
def __init__(self : Tuple , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : List[Any]=7_6_8 , __UpperCAmelCase : Optional[Any]=2.6592 , **__UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = kwargs.pop("text_config_dict" , __UpperCAmelCase )
UpperCAmelCase__ = kwargs.pop("vision_config_dict" , __UpperCAmelCase )
super().__init__(**__UpperCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
UpperCAmelCase__ = {}
# This is the complete result when using `text_config_dict`.
UpperCAmelCase__ = AltCLIPTextConfig(**__UpperCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
UpperCAmelCase__ = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
UpperCAmelCase__ = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(__UpperCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
UpperCAmelCase__ = {}
# This is the complete result when using `vision_config_dict`.
UpperCAmelCase__ = AltCLIPVisionConfig(**__UpperCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
UpperCAmelCase__ = {
str(__UpperCAmelCase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
UpperCAmelCase__ = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
UpperCAmelCase__ = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(__UpperCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
UpperCAmelCase__ = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
UpperCAmelCase__ = AltCLIPTextConfig(**__UpperCAmelCase )
UpperCAmelCase__ = AltCLIPVisionConfig(**__UpperCAmelCase )
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = logit_scale_init_value
UpperCAmelCase__ = 1.0
@classmethod
def lowercase_ (cls : List[str] , __UpperCAmelCase : AltCLIPTextConfig , __UpperCAmelCase : AltCLIPVisionConfig , **__UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCAmelCase )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 65 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
snake_case_ :List[Any] = 1024
snake_case_ :int = 4096
snake_case_ :int = 24
snake_case_ :Tuple = 16
snake_case_ :Any = [5, 11, 17, 23]
snake_case_ :List[Any] = [256, 512, 1024, 1024]
snake_case_ :str = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
snake_case_ :List[str] = 768
snake_case_ :Any = [1, 1, 1, 0.5]
snake_case_ :Optional[Any] = [256, 512, 768, 768]
snake_case_ :Optional[Any] = 150
snake_case_ :List[str] = 16
snake_case_ :Optional[Any] = (1, 384, 384)
snake_case_ :Tuple = False
snake_case_ :List[Any] = """project"""
if "ade" in checkpoint_url:
snake_case_ :Dict = True
snake_case_ :Optional[int] = 768
snake_case_ :int = [1, 1, 1, 0.5]
snake_case_ :Any = 150
snake_case_ :Optional[Any] = 16
snake_case_ :List[Any] = """huggingface/label-files"""
snake_case_ :Any = """ade20k-id2label.json"""
snake_case_ :Optional[Any] = json.load(open(cached_download(hf_hub_url(_lowercase, _lowercase, repo_type="""dataset""" ) ), """r""" ) )
snake_case_ :Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
snake_case_ :Union[str, Any] = idalabel
snake_case_ :str = {v: k for k, v in idalabel.items()}
snake_case_ :List[str] = [1, 150, 480, 480]
return config, expected_shape
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowercase, _lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case_ :str = name.replace("""pretrained.model""", """dpt.encoder""" )
if "pretrained.model" in name:
snake_case_ :Optional[Any] = name.replace("""pretrained.model""", """dpt.embeddings""" )
if "patch_embed" in name:
snake_case_ :List[str] = name.replace("""patch_embed""", """""" )
if "pos_embed" in name:
snake_case_ :int = name.replace("""pos_embed""", """position_embeddings""" )
if "attn.proj" in name:
snake_case_ :Union[str, Any] = name.replace("""attn.proj""", """attention.output.dense""" )
if "proj" in name and "project" not in name:
snake_case_ :str = name.replace("""proj""", """projection""" )
if "blocks" in name:
snake_case_ :Dict = name.replace("""blocks""", """layer""" )
if "mlp.fc1" in name:
snake_case_ :int = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case_ :int = name.replace("""mlp.fc2""", """output.dense""" )
if "norm1" in name and "backbone" not in name:
snake_case_ :Optional[int] = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
snake_case_ :str = name.replace("""norm2""", """layernorm_after""" )
if "scratch.output_conv" in name:
snake_case_ :List[str] = name.replace("""scratch.output_conv""", """head""" )
if "scratch" in name:
snake_case_ :int = name.replace("""scratch""", """neck""" )
if "layer1_rn" in name:
snake_case_ :Tuple = name.replace("""layer1_rn""", """convs.0""" )
if "layer2_rn" in name:
snake_case_ :List[str] = name.replace("""layer2_rn""", """convs.1""" )
if "layer3_rn" in name:
snake_case_ :Tuple = name.replace("""layer3_rn""", """convs.2""" )
if "layer4_rn" in name:
snake_case_ :Optional[int] = name.replace("""layer4_rn""", """convs.3""" )
if "refinenet" in name:
snake_case_ :Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case_ :Optional[Any] = name.replace(f"""refinenet{layer_idx}""", f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
snake_case_ :str = name.replace("""out_conv""", """projection""" )
if "resConfUnit1" in name:
snake_case_ :Union[str, Any] = name.replace("""resConfUnit1""", """residual_layer1""" )
if "resConfUnit2" in name:
snake_case_ :int = name.replace("""resConfUnit2""", """residual_layer2""" )
if "conv1" in name:
snake_case_ :int = name.replace("""conv1""", """convolution1""" )
if "conv2" in name:
snake_case_ :str = name.replace("""conv2""", """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case_ :Optional[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case_ :List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case_ :Optional[int] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case_ :int = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case_ :Optional[Any] = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
snake_case_ :Optional[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
snake_case_ :int = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
snake_case_ :Optional[int] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
snake_case_ :List[str] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
snake_case_ :Tuple = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
snake_case_ :str = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
snake_case_ :List[str] = name.replace("""pretrained""", """dpt""" )
if "bn" in name:
snake_case_ :Optional[int] = name.replace("""bn""", """batch_norm""" )
if "head" in name:
snake_case_ :Dict = name.replace("""head""", """head.head""" )
if "encoder.norm" in name:
snake_case_ :Optional[int] = name.replace("""encoder.norm""", """layernorm""" )
if "auxlayer" in name:
snake_case_ :List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" )
if "backbone" in name:
snake_case_ :List[str] = name.replace("""backbone""", """backbone.bit.encoder""" )
if ".." in name:
snake_case_ :str = name.replace("""..""", """.""" )
if "stem.conv" in name:
snake_case_ :Optional[Any] = name.replace("""stem.conv""", """bit.embedder.convolution""" )
if "blocks" in name:
snake_case_ :int = name.replace("""blocks""", """layers""" )
if "convolution" in name and "backbone" in name:
snake_case_ :Any = name.replace("""convolution""", """conv""" )
if "layer" in name and "backbone" in name:
snake_case_ :Optional[int] = name.replace("""layer""", """layers""" )
if "backbone.bit.encoder.bit" in name:
snake_case_ :Any = name.replace("""backbone.bit.encoder.bit""", """backbone.bit""" )
if "embedder.conv" in name:
snake_case_ :List[Any] = name.replace("""embedder.conv""", """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
snake_case_ :Any = name.replace("""backbone.bit.encoder.stem.norm""", """backbone.bit.embedder.norm""" )
return name
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ :str = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
snake_case_ :List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ :List[Any] = in_proj_weight[: config.hidden_size, :]
snake_case_ :Union[str, Any] = in_proj_bias[: config.hidden_size]
snake_case_ :List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ :List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ :Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ :Optional[int] = in_proj_bias[-config.hidden_size :]
def A_ ( ):
'''simple docstring'''
snake_case_ :Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ :List[Any] = Image.open(requests.get(_lowercase, stream=_lowercase ).raw )
return im
@torch.no_grad()
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_, snake_case_ :int = get_dpt_config(_lowercase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
snake_case_ :Any = torch.load(_lowercase, map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_lowercase )
# rename keys
for key in state_dict.copy().keys():
snake_case_ :Any = state_dict.pop(_lowercase )
snake_case_ :int = val
# read in qkv matrices
read_in_q_k_v(_lowercase, _lowercase )
# load HuggingFace model
snake_case_ :Tuple = DPTForSemanticSegmentation(_lowercase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# Check outputs on an image
snake_case_ :List[str] = 480 if """ade""" in checkpoint_url else 384
snake_case_ :Any = DPTImageProcessor(size=_lowercase )
snake_case_ :Any = prepare_img()
snake_case_ :Tuple = image_processor(_lowercase, return_tensors="""pt""" )
# forward pass
snake_case_ :str = model(**_lowercase ).logits if """ade""" in checkpoint_url else model(**_lowercase ).predicted_depth
if show_prediction:
snake_case_ :Union[str, Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="""bicubic""", align_corners=_lowercase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
__a = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 66 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 0 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=5_1_2,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 67 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
raise RuntimeError("CUDA out of memory." )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
super().__init__()
A__ = nn.Linear(3 , 4 )
A__ = nn.BatchNormad(4 )
A__ = nn.Linear(4 , 5 )
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowercase ) ) )
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase ):
nonlocal batch_sizes
batch_sizes.append(lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase , lowercase ):
nonlocal batch_sizes
batch_sizes.append(lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
A__ , A__ = mock_training_loop_function("hello" )
self.assertListEqual(lowercase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase ):
pass
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase , lowercase , lowercase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = torch.cuda.memory_allocated()
A__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowercase )
A__ = release_memory(lowercase )
self.assertEqual(torch.cuda.memory_allocated() , lowercase )
| 68 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = MvpTokenizer
SCREAMING_SNAKE_CASE_ = MvpTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = filter_roberta_detectors
def a_ ( self) -> List[str]:
super().setUp()
snake_case_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case_ = {'unk_token': '<unk>'}
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase__) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(lowerCAmelCase__))
def a_ ( self, **lowerCAmelCase__) -> List[Any]:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> str:
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> List[str]:
return "lower newer", "lower newer"
@cached_property
def a_ ( self) -> Any:
return MvpTokenizer.from_pretrained('RUCAIBox/mvp')
@cached_property
def a_ ( self) -> str:
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp')
@require_torch
def a_ ( self) -> List[str]:
snake_case_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(lowerCAmelCase__, max_length=len(lowerCAmelCase__), padding=lowerCAmelCase__, return_tensors='pt')
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual((2, 9), batch.input_ids.shape)
self.assertEqual((2, 9), batch.attention_mask.shape)
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# Test that special tokens are reset
@require_torch
def a_ ( self) -> List[Any]:
snake_case_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors='pt')
# check if input_ids are returned and no labels
self.assertIn('input_ids', lowerCAmelCase__)
self.assertIn('attention_mask', lowerCAmelCase__)
self.assertNotIn('labels', lowerCAmelCase__)
self.assertNotIn('decoder_attention_mask', lowerCAmelCase__)
@require_torch
def a_ ( self) -> Optional[int]:
snake_case_ = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(text_target=lowerCAmelCase__, max_length=32, padding='max_length', return_tensors='pt')
self.assertEqual(32, targets['input_ids'].shape[1])
@require_torch
def a_ ( self) -> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'], padding=lowerCAmelCase__, truncation=lowerCAmelCase__, return_tensors='pt')
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual(batch.input_ids.shape, (2, 1024))
@require_torch
def a_ ( self) -> Union[str, Any]:
snake_case_ = ['A long paragraph for summarization.']
snake_case_ = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(lowerCAmelCase__, text_target=lowerCAmelCase__, return_tensors='pt')
snake_case_ = inputs['input_ids']
snake_case_ = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self) -> Dict:
pass
def a_ ( self) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
snake_case_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self.tokenizer_class.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = 'A, <mask> AllenNLP sentence.'
snake_case_ = tokenizer_r.encode_plus(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__)
snake_case_ = tokenizer_p.encode_plus(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids']), sum(tokens_p['token_type_ids']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']), sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']), )
snake_case_ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
snake_case_ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2])
self.assertSequenceEqual(
lowerCAmelCase__, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
self.assertSequenceEqual(
lowerCAmelCase__, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
| 69 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ :Tuple = '''src/transformers'''
A_ :str = '''docs/source/en/tasks'''
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
with open(a_ ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
__UpperCamelCase : Optional[int] =f.readlines()
# Find the start prompt.
__UpperCamelCase : Optional[Any] =0
while not lines[start_index].startswith(a_ ):
start_index += 1
start_index += 1
__UpperCamelCase : Any =start_index
while not lines[end_index].startswith(a_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ :int = direct_transformers_import(TRANSFORMERS_PATH)
A_ :Union[str, Any] = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ :Optional[Any] = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def A ( a_ ) -> Tuple:
__UpperCamelCase : int =TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : Dict =SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(a_ ,set() )
__UpperCamelCase : List[Any] ={
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def A ( a_ ,a_=False ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple =_find_text_in_file(
filename=os.path.join(a_ ,a_ ) ,start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' ,end_prompt='<!--End of the generated tip-->' ,)
__UpperCamelCase : Optional[Any] =get_model_list_for_task(a_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(a_ ,a_ ) ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
A_ :List[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ :int = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 71 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 0 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : Tuple , __lowerCAmelCase : bool = True ):
"""simple docstring"""
_lowerCamelCase : dict[T, list[T]] = {} # dictionary of lists
_lowerCamelCase : Any = directed
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : T ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase )
self.adj_list[destination_vertex].append(__lowerCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase )
_lowerCamelCase : List[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCamelCase : Any = [destination_vertex]
_lowerCamelCase : Optional[int] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCamelCase : List[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCamelCase : Any = [destination_vertex]
_lowerCamelCase : List[str] = []
return self
def __repr__( self : Dict ):
"""simple docstring"""
return pformat(self.adj_list )
| 72 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = '''trocr'''
_UpperCAmelCase : int = ['''past_key_values''']
_UpperCAmelCase : Any = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str=5_0_2_6_5 ,SCREAMING_SNAKE_CASE__ : int=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2 ,SCREAMING_SNAKE_CASE__ : str=1_6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Any=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : str=0 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,**SCREAMING_SNAKE_CASE__ : int ,):
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Dict = d_model
__lowerCamelCase : Union[str, Any] = decoder_layers
__lowerCamelCase : Optional[int] = decoder_attention_heads
__lowerCamelCase : str = decoder_ffn_dim
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : Any = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Optional[Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : Dict = use_cache
__lowerCamelCase : Dict = scale_embedding
__lowerCamelCase : List[str] = use_learned_position_embeddings
__lowerCamelCase : int = layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
| 73 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = GPTaTokenizer
_lowerCamelCase: Tuple = GPTaTokenizerFast
_lowerCamelCase: str = True
_lowerCamelCase: Any = {'''add_prefix_space''': True}
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A = {'unk_token': '<unk>'}
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,**A_ : Tuple ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,**A_ : List[str] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ) -> Union[str, Any]:
A = 'lower newer'
A = 'lower newer'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = GPTaTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
A = 'lower newer'
A = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
A = tokenizer.tokenize(A_ ,add_prefix_space=A_ )
self.assertListEqual(A_ ,A_ )
A = tokens + [tokenizer.unk_token]
A = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer(add_prefix_space=A_ )
A = 'lower newer'
# Testing tokenization
A = tokenizer.tokenize(A_ ,add_prefix_space=A_ )
A = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
# Testing conversion to ids without special tokens
A = tokenizer.encode(A_ ,add_special_tokens=A_ ,add_prefix_space=A_ )
A = rust_tokenizer.encode(A_ ,add_special_tokens=A_ )
self.assertListEqual(A_ ,A_ )
# Testing conversion to ids with special tokens
A = self.get_rust_tokenizer(add_prefix_space=A_ )
A = tokenizer.encode(A_ ,add_prefix_space=A_ )
A = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ ,A_ )
# Testing the unknown token
A = tokens + [rust_tokenizer.unk_token]
A = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,*A_ : Any ,**A_ : Dict ) -> Any:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any]=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
# Simple input
A = 'This is a simple input'
A = ['This is a simple input 1', 'This is a simple input 2']
A = ('This is a simple input', 'This is a pair')
A = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A_ ,tokenizer_r.encode ,A_ ,max_length=A_ ,padding='max_length' )
# Simple input
self.assertRaises(A_ ,tokenizer_r.encode_plus ,A_ ,max_length=A_ ,padding='max_length' )
# Simple input
self.assertRaises(
A_ ,tokenizer_r.batch_encode_plus ,A_ ,max_length=A_ ,padding='max_length' ,)
# Pair input
self.assertRaises(A_ ,tokenizer_r.encode ,A_ ,max_length=A_ ,padding='max_length' )
# Pair input
self.assertRaises(A_ ,tokenizer_r.encode_plus ,A_ ,max_length=A_ ,padding='max_length' )
# Pair input
self.assertRaises(
A_ ,tokenizer_r.batch_encode_plus ,A_ ,max_length=A_ ,padding='max_length' ,)
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = GPTaTokenizer.from_pretrained(self.tmpdirname ,pad_token='<pad>' )
# Simple input
A = 'This is a simple input'
A = ['This is a simple input looooooooong', 'This is a simple input']
A = ('This is a simple input', 'This is a pair')
A = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
A = tokenizer.pad_token_id
A = tokenizer(A_ ,padding='max_length' ,max_length=30 ,return_tensors='np' )
A = tokenizer(A_ ,padding=A_ ,truncate=A_ ,return_tensors='np' )
A = tokenizer(*A_ ,padding='max_length' ,max_length=60 ,return_tensors='np' )
A = tokenizer(A_ ,padding=A_ ,truncate=A_ ,return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = '$$$'
A = GPTaTokenizer.from_pretrained(self.tmpdirname ,bos_token=A_ ,add_bos_token=A_ )
A = 'This is a simple input'
A = ['This is a simple input 1', 'This is a simple input 2']
A = tokenizer.bos_token_id
A = tokenizer(A_ )
A = tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] ,A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A = tokenizer.decode(out_s.input_ids )
A = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
A = [self.get_tokenizer(do_lower_case=A_ ,add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A = 'Encode this.'
A = 'This one too please.'
A = tokenizer.encode(A_ ,add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ ,add_special_tokens=A_ )
A = tokenizer.encode_plus(
A_ ,A_ ,add_special_tokens=A_ ,return_special_tokens_mask=A_ ,)
A = encoded_sequence_dict['input_ids']
A = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(A_ ) ,len(A_ ) )
A = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
A = [x for x in filtered_sequence if x is not None]
self.assertEqual(A_ ,A_ )
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
A = AutoTokenizer.from_pretrained('facebook/opt-350m' ,from_slow=A_ )
A = 'A photo of a cat'
A = tokenizer.encode(
A_ ,)
self.assertEqual(A_ ,[2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
A = AutoTokenizer.from_pretrained('./test_opt' )
A = tokenizer.encode(
A_ ,)
self.assertEqual(A_ ,[2, 250, 1345, 9, 10, 4758] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = AutoTokenizer.from_pretrained('facebook/opt-350m' ,use_slow=A_ )
A = 'A photo of a cat'
A = tokenizer.encode(
A_ ,)
# Same as above
self.assertEqual(A_ ,[2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = AutoTokenizer.from_pretrained('facebook/opt-350m' ,from_slow=A_ )
A = 'bos'
A = tokenizer.get_vocab()['bos']
A = 'A photo of a cat'
A = tokenizer.encode(
A_ ,)
# We changed the bos token
self.assertEqual(A_ ,[3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
A = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
A = tokenizer.encode(
A_ ,)
self.assertEqual(A_ ,[3_1957, 250, 1345, 9, 10, 4758] ) | 74 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=3, lowerCAmelCase=30, lowerCAmelCase=400, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=[0.5, 0.5, 0.5], lowerCAmelCase=[0.5, 0.5, 0.5], lowerCAmelCase=True, lowerCAmelCase=1 / 255, lowerCAmelCase=True, ):
"""simple docstring"""
lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =min_resolution
lowerCamelCase_ =max_resolution
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean
lowerCamelCase_ =image_std
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_pad
def lowercase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
if not batched:
lowerCamelCase_ =image_inputs[0]
if isinstance(lowerCAmelCase, Image.Image ):
lowerCamelCase_, lowerCamelCase_ =image.size
else:
lowerCamelCase_, lowerCamelCase_ =image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ =int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase_ =self.size['''shortest_edge''']
elif w > h:
lowerCamelCase_ =self.size['''shortest_edge''']
lowerCamelCase_ =int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase_ =self.size['''shortest_edge''']
lowerCamelCase_ =self.size['''shortest_edge''']
else:
lowerCamelCase_ =[]
for image in image_inputs:
lowerCamelCase_, lowerCamelCase_ =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[0] )[0]
lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : List[str] =DetaImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =DetaImageProcessingTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase, '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''size''' ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, Image.Image )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase )
lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, np.ndarray )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f:
lowerCamelCase_ =json.loads(f.read() )
lowerCamelCase_ ={'''image_id''': 39_769, '''annotations''': target}
# encode them
lowerCamelCase_ =DetaImageProcessor()
lowerCamelCase_ =image_processing(images=lowerCAmelCase, annotations=lowerCAmelCase, return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCAmelCase, atol=1e-4 ) )
# verify area
lowerCamelCase_ =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCAmelCase ) )
# verify boxes
lowerCamelCase_ =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCAmelCase, atol=1e-3 ) )
# verify image_id
lowerCamelCase_ =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCAmelCase ) )
# verify is_crowd
lowerCamelCase_ =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCAmelCase ) )
# verify class_labels
lowerCamelCase_ =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCAmelCase ) )
# verify orig_size
lowerCamelCase_ =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCAmelCase ) )
# verify size
lowerCamelCase_ =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCAmelCase ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f:
lowerCamelCase_ =json.loads(f.read() )
lowerCamelCase_ ={'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
lowerCamelCase_ =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase_ =DetaImageProcessor(format='''coco_panoptic''' )
lowerCamelCase_ =image_processing(images=lowerCAmelCase, annotations=lowerCAmelCase, masks_path=lowerCAmelCase, return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCAmelCase, atol=1e-4 ) )
# verify area
lowerCamelCase_ =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCAmelCase ) )
# verify boxes
lowerCamelCase_ =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCAmelCase, atol=1e-3 ) )
# verify image_id
lowerCamelCase_ =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCAmelCase ) )
# verify is_crowd
lowerCamelCase_ =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCAmelCase ) )
# verify class_labels
lowerCamelCase_ =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCAmelCase ) )
# verify masks
lowerCamelCase_ =822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), lowerCAmelCase )
# verify orig_size
lowerCamelCase_ =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCAmelCase ) )
# verify size
lowerCamelCase_ =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCAmelCase ) )
| 75 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['vqvae']
def __init__( self : Dict , a : AutoencoderKL , a : UNetaDConditionModel , a : Mel , a : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a , scheduler=a , mel=a , vqvae=a )
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , a ) else 1000
@torch.no_grad()
def __call__( self : str , a : int = 1 , a : str = None , a : np.ndarray = None , a : int = 0 , a : int = 0 , a : int = None , a : torch.Generator = None , a : float = 0 , a : float = 0 , a : torch.Generator = None , a : float = 0 , a : torch.Tensor = None , a : torch.Tensor = None , a : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = steps or self.get_default_steps()
self.scheduler.set_timesteps(a )
SCREAMING_SNAKE_CASE : str = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE : Union[str, Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=a , device=self.device , )
SCREAMING_SNAKE_CASE : List[Any] = noise
SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(a , a )
SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(a )
SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = (input_image / 255) * 2 - 1
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE : Dict = self.vqvae.encode(torch.unsqueeze(a , 0 ) ).latent_dist.sample(
generator=a )[0]
SCREAMING_SNAKE_CASE : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.add_noise(a , a , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE : int = int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE : int = int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(a , a , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(a , a , a )["sample"]
else:
SCREAMING_SNAKE_CASE : List[Any] = self.unet(a , a )["sample"]
if isinstance(self.scheduler , a ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step(
model_output=a , timestep=a , sample=a , eta=a , generator=a , )["prev_sample"]
else:
SCREAMING_SNAKE_CASE : Dict = self.scheduler.step(
model_output=a , timestep=a , sample=a , generator=a , )["prev_sample"]
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE : Optional[int] = mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE : int = 1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE : List[str] = self.vqvae.decode(a )["sample"]
SCREAMING_SNAKE_CASE : Tuple = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE : Optional[Any] = (images * 255).round().astype("uint8" )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(a , mode="RGB" ).convert("L" ) for _ in images) )
SCREAMING_SNAKE_CASE : int = [self.mel.image_to_audio(a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(a )[:, np.newaxis, :] ) , **ImagePipelineOutput(a ) )
@torch.no_grad()
def __UpperCamelCase ( self : List[str] , a : List[Image.Image] , a : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , a )
self.scheduler.set_timesteps(a )
SCREAMING_SNAKE_CASE : str = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE : Optional[Any] = (sample / 255) * 2 - 1
SCREAMING_SNAKE_CASE : Dict = torch.Tensor(a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE : int = self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : Any = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE : Dict = self.unet(a , a )["sample"]
SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE : List[str] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __UpperCamelCase ( a : torch.Tensor , a : torch.Tensor , a : float ) -> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = acos(torch.dot(torch.flatten(a ) , torch.flatten(a ) ) / torch.norm(a ) / torch.norm(a ) )
return sin((1 - alpha) * theta ) * xa / sin(a ) + sin(alpha * theta ) * xa / sin(a ) | 76 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = ["image_processor", "tokenizer"]
lowerCamelCase__ : Tuple = "ChineseCLIPImageProcessor"
lowerCamelCase__ : List[Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a=None , a=None , **a ) -> List[str]:
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowercase__ : str = kwargs.pop('feature_extractor' )
lowercase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
lowercase__ : str = self.image_processor
def __call__( self , a=None , a=None , a=None , **a ) -> Optional[Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowercase__ : Any = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
lowercase__ : str = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
lowercase__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _UpperCAmelCase ( self , *a , **a ) -> Dict:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> List[Any]:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = self.tokenizer.model_input_names
lowercase__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self ) -> str:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
| 77 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 0 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
snake_case_ = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
snake_case_ = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def _lowerCAmelCase ( ):
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , bootstrap_aggregation=lowercase_ , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(lowercase_ , lowercase_ )
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , bootstrap_aggregation=lowercase_ , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def _lowerCAmelCase ( ):
UpperCAmelCase = 'rougeLsum'
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=[k] )[k]
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _lowerCAmelCase ( ):
UpperCAmelCase = ['rouge1', 'rouge2', 'rougeL']
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=lowercase_ )
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=lowercase_ )
assert score_sep == score_no_sep
def _lowerCAmelCase ( ):
UpperCAmelCase = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
UpperCAmelCase = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ ) == calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ )
def _lowerCAmelCase ( ):
UpperCAmelCase = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
UpperCAmelCase = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , rouge_keys=['rougeLsum'] , newline_sep=lowercase_ )['rougeLsum']
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def _lowerCAmelCase ( ):
UpperCAmelCase = Path('examples/seq2seq/test_data/wmt_en_ro' )
UpperCAmelCase = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(lowercase_ , lowercase_ )
UpperCAmelCase = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
| 78 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 0 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> list[int]:
'''simple docstring'''
_A = [0] * no_of_processes
_A = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowercase ):
_A = burst_time[i]
_A = []
_A = 0
_A = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_A = []
_A = -1
for i in range(__lowercase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowercase )
if len(__lowercase ) > 0:
_A = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_A = i
total_time += burst_time[target_process]
completed += 1
_A = 0
_A = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> list[int]:
'''simple docstring'''
_A = [0] * no_of_processes
for i in range(__lowercase ):
_A = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
lowerCamelCase_ = 4
lowerCamelCase_ = [2, 5, 3, 7]
lowerCamelCase_ = [0, 0, 0, 0]
lowerCamelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 79 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def _UpperCamelCase ( __A , __A , __A ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__A , 2 ) - pow(__A , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__A , 2 ) - pow(__A , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__A , 2 ) + pow(__A , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _A ( lowercase , lowercase , lowercase , lowercase = 1_00 , ):
"""simple docstring"""
a =x_start
a =fnc(lowercase )
a =0.0
for _ in range(lowercase ):
# Approximates curve as a sequence of linear lines and sums their length
a =(x_end - x_start) / steps + xa
a =fnc(lowercase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
a =xa
a =fxa
return length
if __name__ == "__main__":
def _A ( lowercase ):
"""simple docstring"""
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowerCamelCase_ : Tuple = 1_0
while i <= 1_0_0_0_0_0:
print(F'With {i} steps: {line_length(f, -1_0, 1_0, i)}')
i *= 1_0 | 81 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowerCAmelCase :
def __init__( self , _snake_case , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = 13
_lowerCAmelCase = 7
_lowerCAmelCase = 30
_lowerCAmelCase = self.seq_length + self.mem_len
_lowerCAmelCase = 15
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = 99
_lowerCAmelCase = [10, 50, 80]
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 128
_lowerCAmelCase = 2
_lowerCAmelCase = 2
_lowerCAmelCase = None
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = 3
_lowerCAmelCase = self.vocab_size - 1
_lowerCAmelCase = 0.01
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case ( self ):
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFTransfoXLModel(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = model(_snake_case ).to_tuple()
_lowerCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase = model(_snake_case ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFTransfoXLLMHeadModel(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = model(_snake_case ).to_tuple()
_lowerCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase = model(_snake_case ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase = model(_snake_case ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFTransfoXLForSequenceClassification(_snake_case )
_lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowerCamelCase = () if is_tf_available() else ()
__lowerCamelCase = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFTransfoXLModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_snake_case , d_embed=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
self.model_tester.set_seed()
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
self.model_tester.set_seed()
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase = model.get_output_embeddings()
assert isinstance(_snake_case , tf.keras.layers.Layer )
_lowerCAmelCase = model.get_bias()
assert name is None
else:
_lowerCAmelCase = model.get_output_embeddings()
assert x is None
_lowerCAmelCase = model.get_bias()
assert name is None
def snake_case ( self ):
"""simple docstring"""
pass
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFTransfoXLModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def snake_case ( self ):
"""simple docstring"""
pass
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase = model.generate(_snake_case , max_length=200 , do_sample=_snake_case )
self.assertListEqual(output_ids[0].numpy().tolist() , _snake_case )
| 82 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase__ ( lowercase ):
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Tuple = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self._create_example_records()
_UpperCamelCase : Any = Dataset.from_list(lowerCamelCase__ )
self.assertListEqual(dset.column_names ,['col_1', 'col_2'] )
for i, r in enumerate(lowerCamelCase__ ):
self.assertDictEqual(lowerCamelCase__ ,example_records[i] )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self._create_example_records()
_UpperCamelCase : Any = Dataset.from_list(lowerCamelCase__ )
_UpperCamelCase : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info ,dset_from_dict.info )
def UpperCamelCase_ ( self : List[Any] ): # checks what happens with missing columns
'''simple docstring'''
_UpperCamelCase : str = [{'col_1': 1}, {'col_2': 'x'}]
_UpperCamelCase : Tuple = Dataset.from_list(lowerCamelCase__ )
self.assertDictEqual(dset[0] ,{'col_1': 1} )
self.assertDictEqual(dset[1] ,{'col_1': None} ) # NB: first record is used for columns
def UpperCamelCase_ ( self : Optional[int] ): # checks if the type can be inferred from the second record
'''simple docstring'''
_UpperCamelCase : Any = [{'col_1': []}, {'col_1': [1, 2]}]
_UpperCamelCase : int = Dataset.from_list(lowerCamelCase__ )
self.assertEqual(dset.info.features['col_1'] ,Sequence(Value('int64' ) ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase__ ) ,0 )
self.assertListEqual(dset.column_names ,[] )
| 83 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :jnp.ndarray
UpperCAmelCase_ :jnp.ndarray
class _SCREAMING_SNAKE_CASE ( nn.Module ):
UpperCAmelCase_ :int
UpperCAmelCase_ :Tuple[int] = (16, 32, 96, 256)
UpperCAmelCase_ :jnp.dtype = jnp.floataa
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase_ :int = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase_ :Union[str, Any] = self.block_out_channels[i]
lowerCAmelCase_ :Optional[int] = self.block_out_channels[i + 1]
lowerCAmelCase_ :int = nn.Conv(
__A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__A )
lowerCAmelCase_ :List[str] = nn.Conv(
__A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__A )
lowerCAmelCase_ :Optional[int] = blocks
lowerCAmelCase_ :int = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __A ) -> Tuple:
lowerCAmelCase_ :Dict = self.conv_in(__A )
lowerCAmelCase_ :List[str] = nn.silu(__A )
for block in self.blocks:
lowerCAmelCase_ :Any = block(__A )
lowerCAmelCase_ :Optional[int] = nn.silu(__A )
lowerCAmelCase_ :List[Any] = self.conv_out(__A )
return embedding
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , A__ , A__ ):
UpperCAmelCase_ :int = 32
UpperCAmelCase_ :int = 4
UpperCAmelCase_ :Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ :Union[bool, Tuple[bool]] = False
UpperCAmelCase_ :Tuple[int] = (320, 640, 1280, 1280)
UpperCAmelCase_ :int = 2
UpperCAmelCase_ :Union[int, Tuple[int]] = 8
UpperCAmelCase_ :Optional[Union[int, Tuple[int]]] = None
UpperCAmelCase_ :int = 1280
UpperCAmelCase_ :float = 0.0
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :jnp.dtype = jnp.floataa
UpperCAmelCase_ :bool = True
UpperCAmelCase_ :int = 0
UpperCAmelCase_ :str = "rgb"
UpperCAmelCase_ :Tuple[int] = (16, 32, 96, 256)
def __lowerCAmelCase ( self , __A ) -> FrozenDict:
# init input tensors
lowerCAmelCase_ :Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase_ :Dict = jnp.zeros(__A , dtype=jnp.floataa )
lowerCAmelCase_ :List[Any] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase_ :Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase_ :Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase_ :Optional[int] = jnp.zeros(__A , dtype=jnp.floataa )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = jax.random.split(__A )
lowerCAmelCase_ :Optional[int] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__A , __A , __A , __A , __A )["params"]
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Union[str, Any] = self.block_out_channels
lowerCAmelCase_ :int = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase_ :Dict = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase_ :int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase_ :Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase_ :Optional[Any] = FlaxTimestepEmbedding(__A , dtype=self.dtype )
lowerCAmelCase_ :int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase_ :List[str] = self.only_cross_attention
if isinstance(__A , __A ):
lowerCAmelCase_ :List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__A , __A ):
lowerCAmelCase_ :Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Optional[Any] = []
lowerCAmelCase_ :Dict = block_out_channels[0]
lowerCAmelCase_ :List[Any] = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase_ :List[Any] = output_channel
lowerCAmelCase_ :List[str] = block_out_channels[i]
lowerCAmelCase_ :Tuple = i == len(__A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase_ :Tuple = FlaxCrossAttnDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase_ :Optional[int] = FlaxDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__A )
for _ in range(self.layers_per_block ):
lowerCAmelCase_ :List[str] = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
if not is_final_block:
lowerCAmelCase_ :str = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
lowerCAmelCase_ :List[Any] = down_blocks
lowerCAmelCase_ :Optional[Any] = controlnet_down_blocks
# mid
lowerCAmelCase_ :int = block_out_channels[-1]
lowerCAmelCase_ :List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase_ :Dict = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __A , __A , __A , __A , __A = 1.0 , __A = True , __A = False , ) -> Union[FlaxControlNetOutput, Tuple]:
lowerCAmelCase_ :Union[str, Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase_ :Optional[int] = jnp.flip(__A , axis=1 )
# 1. time
if not isinstance(__A , jnp.ndarray ):
lowerCAmelCase_ :List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__A , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ :str = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase_ :Union[str, Any] = jnp.expand_dims(__A , 0 )
lowerCAmelCase_ :List[Any] = self.time_proj(__A )
lowerCAmelCase_ :Optional[Any] = self.time_embedding(__A )
# 2. pre-process
lowerCAmelCase_ :int = jnp.transpose(__A , (0, 2, 3, 1) )
lowerCAmelCase_ :List[Any] = self.conv_in(__A )
lowerCAmelCase_ :Union[str, Any] = jnp.transpose(__A , (0, 2, 3, 1) )
lowerCAmelCase_ :List[str] = self.controlnet_cond_embedding(__A )
sample += controlnet_cond
# 3. down
lowerCAmelCase_ :Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__A , __A ):
lowerCAmelCase_ , lowerCAmelCase_ :Any = down_block(__A , __A , __A , deterministic=not train )
else:
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = down_block(__A , __A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase_ :int = self.mid_block(__A , __A , __A , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase_ :Dict = ()
for down_block_res_sample, controlnet_block in zip(__A , self.controlnet_down_blocks ):
lowerCAmelCase_ :Union[str, Any] = controlnet_block(__A )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase_ :Optional[Any] = controlnet_down_block_res_samples
lowerCAmelCase_ :List[Any] = self.controlnet_mid_block(__A )
# 6. scaling
lowerCAmelCase_ :List[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__A , mid_block_res_sample=__A )
| 84 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
'''simple docstring'''
import json
import sys
def UpperCamelCase_( snake_case : List[Any] , snake_case : Tuple ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as f:
snake_case_ = json.load(snake_case )
snake_case_ = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(snake_case ):
snake_case_ = results[benchmark_name]
snake_case_ = benchmark_name.split("/" )[-1]
output_md.append(f'### Benchmark: {benchmark_file_name}' )
snake_case_ = "| metric |"
snake_case_ = "|--------|"
snake_case_ = "| new / old (diff) |"
for metric_name in sorted(snake_case ):
snake_case_ = benchmark_res[metric_name]
snake_case_ = metric_vals["new"]
snake_case_ = metric_vals.get("old" , snake_case )
snake_case_ = metric_vals.get("diff" , snake_case )
snake_case_ = f' {new_val:f}' if isinstance(snake_case , (int, float) ) else "None"
if old_val is not None:
val_str += f' / {old_val:f}' if isinstance(snake_case , (int, float) ) else "None"
if dif_val is not None:
val_str += f' ({dif_val:f})' if isinstance(snake_case , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(snake_case ) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = sys.argv[1]
_SCREAMING_SNAKE_CASE : Optional[int] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 85 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if n == 1 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
__lowerCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Tuple = 2
while digits < n:
index += 1
__lowerCAmelCase : List[str] = len(str(fibonacci(_UpperCamelCase ) ) )
return index
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
return fibonacci_digits_index(_UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 86 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case_ ( __A ):
def __init__( self : List[Any] , *lowercase_ : List[Any] , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple=None , **lowercase_ : str ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
lowercase__ : Tuple = eval_examples
lowercase__ : Optional[int] = post_process_function
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[Dataset] = None , lowercase_ : Any=None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "eval" , **lowercase_ : Any , ) -> Dict[str, float]:
lowercase__ : List[Any] = gen_kwargs.copy()
lowercase__ : Optional[int] = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowercase__ : List[str] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowercase__ : Tuple = gen_kwargs
lowercase__ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ : str = self.get_eval_dataloader(lowercase_ )
lowercase__ : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : List[str] = self.compute_metrics
lowercase__ : str = None
lowercase__ : Any = time.time()
lowercase__ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ : Optional[int] = eval_loop(
lowercase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase__ : int = compute_metrics
lowercase__ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ : Optional[int] = self.post_process_function(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase__ : List[str] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
lowercase__ : Union[str, Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def __UpperCamelCase ( self : str , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : str = "test" , **lowercase_ : Any ) -> Tuple:
lowercase__ : Any = gen_kwargs.copy()
lowercase__ : Tuple = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : List[Any] = self.compute_metrics
lowercase__ : Optional[Any] = None
lowercase__ : Any = time.time()
lowercase__ : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ : Dict = eval_loop(
lowercase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase__ : Optional[int] = compute_metrics
lowercase__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ : int = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , "predict" )
lowercase__ : List[Any] = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase__ : Tuple = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 87 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__magic_name__ = precision
__magic_name__ = ceil(precision / 14 )
__magic_name__ = 426880 * Decimal(10005 ).sqrt()
__magic_name__ = 1
__magic_name__ = 13591409
__magic_name__ = Decimal(A_ )
for k in range(1, A_ ):
__magic_name__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(A_ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : str = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 88 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 0 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( lowerCAmelCase_ ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> XGBClassifier:
_a : int = XGBClassifier()
classifier.fit(lowerCAmelCase_ , lowerCAmelCase_ )
return classifier
def __lowerCamelCase ( ) -> None:
_a : Optional[Any] = load_iris()
_a , _a : List[Any] = data_handling(lowerCAmelCase_ )
_a , _a , _a , _a : int = train_test_split(
lowerCAmelCase_ , lowerCAmelCase_ , test_size=0.25 )
_a : str = iris['target_names']
# Create an XGBoost Classifier from the training data
_a : Any = xgboost(lowerCAmelCase_ , lowerCAmelCase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , display_labels=lowerCAmelCase_ , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 89 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__A = random.Random()
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
__lowerCamelCase = global_rng
__lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=400 , lowerCamelCase__=2_000 , lowerCamelCase__=10 , lowerCamelCase__=160 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_000 , lowerCamelCase__=False , lowerCamelCase__=True , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = min_seq_length
__lowerCamelCase = max_seq_length
__lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCamelCase = padding_value
__lowerCamelCase = sampling_rate
__lowerCamelCase = return_attention_mask
__lowerCamelCase = do_normalize
__lowerCamelCase = feature_size
__lowerCamelCase = chunk_length
__lowerCamelCase = hop_length
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self , lowerCamelCase__=False , lowerCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
__lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = WhisperFeatureExtractor if is_speech_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = WhisperFeatureExtractionTester(self )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
__lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
__lowerCamelCase = feat_extract_first.to_dict()
__lowerCamelCase = feat_extract_second.to_dict()
__lowerCamelCase = feat_extract_first.mel_filters
__lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(lowerCamelCase__ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCamelCase__ )
__lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
__lowerCamelCase = feat_extract_first.to_dict()
__lowerCamelCase = feat_extract_second.to_dict()
__lowerCamelCase = feat_extract_first.mel_filters
__lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
__lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCamelCase = np.asarray(lowerCamelCase__ )
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
__lowerCamelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
__lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
import torch
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCamelCase = np.random.rand(100 , 32 ).astype(np.floataa )
__lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCamelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowerCamelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__lowerCamelCase = ds.sort('id' ).select(range(lowerCamelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
# fmt: off
__lowerCamelCase = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
__lowerCamelCase = self._load_datasamples(1 )
__lowerCamelCase = WhisperFeatureExtractor()
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase__ , atol=1e-4 ) )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCamelCase = self._load_datasamples(1 )[0]
__lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
__lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 90 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : int , lowercase_ : List[str] , lowercase_ : int=None , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]="resnet50" , lowercase_ : Dict=3 , lowercase_ : str=32 , lowercase_ : str=3 , lowercase_ : Tuple=True , lowercase_ : str=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = parent
SCREAMING_SNAKE_CASE_ : Tuple = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE_ : Optional[int] = stage_names
SCREAMING_SNAKE_CASE_ : Any = out_features
SCREAMING_SNAKE_CASE_ : List[str] = backbone
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Any = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ : Any = is_training
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_config()
return config, pixel_values
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Dict , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
__UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = TimmBackboneModelTester(self)
SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''resnet18'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''microsoft/resnet-18'''
SCREAMING_SNAKE_CASE_ : str = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
SCREAMING_SNAKE_CASE_ : str = AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''')
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''')
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''')
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''')
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''')
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''')
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''')
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE_ : Optional[int] = self.all_model_classes[0]
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_)
model.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = model(**lowercase_)
SCREAMING_SNAKE_CASE_ : int = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE_ : List[str] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE_ : Any = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE_ : List[str] = copy.deepcopy(lowercase_)
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE_ : int = copy.deepcopy(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_)
| 91 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 92 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 0 |
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowercase : Tuple = get_logger(__name__)
_lowercase : Union[str, Any] = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowerCAmelCase__ :
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase__ :
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase__ ( lowerCamelCase_ ):
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for processor in self:
lowercase_ : Dict = inspect.signature(processor.__call__ ).parameters
if len(__SCREAMING_SNAKE_CASE ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
lowercase_ : Dict = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Dict = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return scores
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
lowercase_ : int = temperature
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = scores / self.temperature
return scores
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -float('''Inf''' ) , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
lowercase_ : Optional[int] = top_p
lowercase_ : Optional[Any] = filter_value
lowercase_ : Optional[Any] = min_tokens_to_keep
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ : Optional[int] = lax.top_k(__SCREAMING_SNAKE_CASE , scores.shape[-1] )
lowercase_ : List[str] = jnp.full_like(__SCREAMING_SNAKE_CASE , self.filter_value )
lowercase_ : List[Any] = jax.nn.softmax(__SCREAMING_SNAKE_CASE , axis=-1 ).cumsum(axis=-1 )
lowercase_ : Dict = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowercase_ : List[Any] = jnp.roll(__SCREAMING_SNAKE_CASE , 1 )
score_mask |= score_mask.at[:, 0].set(__SCREAMING_SNAKE_CASE )
# min tokens to keep
lowercase_ : List[str] = score_mask.at[:, : self.min_tokens_to_keep].set(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = jnp.where(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = jax.lax.sort_key_val(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[-1]
return next_scores
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -float('''Inf''' ) , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
lowercase_ : str = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = filter_value
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ : Union[str, Any] = scores.shape
lowercase_ : int = jnp.full(batch_size * vocab_size , self.filter_value )
lowercase_ : List[Any] = min(self.top_k , scores.shape[-1] ) # Safety check
lowercase_ , lowercase_ : int = lax.top_k(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = jnp.broadcast_to((jnp.arange(__SCREAMING_SNAKE_CASE ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowercase_ : int = topk_scores.flatten()
lowercase_ : Tuple = topk_indices.flatten() + shift
lowercase_ : str = next_scores_flat.at[topk_indices_flat].set(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = next_scores_flat.reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return next_scores
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = bos_token_id
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = jnp.full(scores.shape , -float('''inf''' ) )
lowercase_ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
lowercase_ : Optional[int] = jnp.where(__SCREAMING_SNAKE_CASE , new_scores.at[:, self.bos_token_id].set(0 ) , __SCREAMING_SNAKE_CASE )
return scores
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = max_length
lowercase_ : Union[str, Any] = eos_token_id
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = jnp.full(scores.shape , -float('''inf''' ) )
lowercase_ : List[str] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowercase_ : Union[str, Any] = jnp.where(__SCREAMING_SNAKE_CASE , new_scores.at[:, self.eos_token_id].set(0 ) , __SCREAMING_SNAKE_CASE )
return scores
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
lowercase_ : str = min_length
lowercase_ : List[Any] = eos_token_id
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowercase_ : Dict = jnp.where(__SCREAMING_SNAKE_CASE , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , __SCREAMING_SNAKE_CASE )
return scores
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = list(__SCREAMING_SNAKE_CASE )
lowercase_ : str = begin_index
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = 1 - jnp.bool_(cur_len - self.begin_index )
lowercase_ : Optional[int] = jnp.where(__SCREAMING_SNAKE_CASE , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , __SCREAMING_SNAKE_CASE )
return scores
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = list(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = dict(__SCREAMING_SNAKE_CASE )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowercase_ : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowercase_ : List[Any] = force_token_array.at[index].set(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = jnp.intaa(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _force_token(__SCREAMING_SNAKE_CASE ):
lowercase_ : Any = scores.shape[0]
lowercase_ : str = self.force_token_array[generation_idx]
lowercase_ : Union[str, Any] = jnp.ones_like(__SCREAMING_SNAKE_CASE , dtype=scores.dtype ) * -float('''inf''' )
lowercase_ : Optional[int] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowercase_ : int = lax.dynamic_update_slice(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (0, current_token) )
return new_scores
lowercase_ : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__SCREAMING_SNAKE_CASE ) , lambda: scores , ) , )
return scores
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = generate_config.eos_token_id
lowercase_ : List[str] = generate_config.no_timestamps_token_id
lowercase_ : str = generate_config.no_timestamps_token_id + 1
lowercase_ : Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__SCREAMING_SNAKE_CASE , '''max_initial_timestamp_index''' ):
lowercase_ : List[Any] = generate_config.max_initial_timestamp_index
else:
lowercase_ : Dict = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowercase_ : Optional[Any] = model_config.vocab_size
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : int = jnp.where((cur_len - self.begin_index) >= 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[int] = jnp.where((cur_len - self.begin_index) < 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return jnp.where(
__SCREAMING_SNAKE_CASE , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , __SCREAMING_SNAKE_CASE , )
lowercase_ : Dict = jax.vmap(__SCREAMING_SNAKE_CASE )(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = jnp.where(cur_len == self.begin_index , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __SCREAMING_SNAKE_CASE , )
lowercase_ : Union[str, Any] = self.timestamp_begin + self.max_initial_timestamp_index
lowercase_ : Optional[Any] = jnp.where(
__SCREAMING_SNAKE_CASE , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , __SCREAMING_SNAKE_CASE , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowercase_ : List[Any] = jax.nn.log_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
def handle_cumulative_probs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowercase_ : List[str] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , __SCREAMING_SNAKE_CASE , )
lowercase_ : int = jax.vmap(__SCREAMING_SNAKE_CASE )(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return scores
| 93 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 0 |
def __lowerCamelCase ( UpperCAmelCase_ : list ):
"""simple docstring"""
a :List[Any] = len(UpperCAmelCase_ )
for i in range(1 , UpperCAmelCase_ ):
a :Union[str, Any] = collection[i]
a :List[Any] = 0
a :Dict = i - 1
while low <= high:
a :List[Any] = (low + high) // 2
if val < collection[mid]:
a :str = mid - 1
else:
a :Dict = mid + 1
for j in range(UpperCAmelCase_ , UpperCAmelCase_ , -1 ):
a :Tuple = collection[j - 1]
a :List[Any] = val
return collection
if __name__ == "__main__":
snake_case : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 94 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 0 |
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(lowercase__ ) == 1:
return True
_lowerCamelCase : List[Any] = series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowerCamelCase : Optional[int] = 0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 0 |
"""simple docstring"""
from __future__ import annotations
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(lowerCamelCase )
return len(lowerCamelCase ) == 9 and set(lowerCamelCase ) == set('123456789' )
def a_ ( ):
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
UpperCAmelCase__ = 1_0_0_0_0_2 * base_num
if is_9_pandigital(lowerCamelCase ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
UpperCAmelCase__ = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A_ ( A__ ) -> tuple:
return (data["data"], data["target"])
def A_ ( A__ , A__ , A__ ) -> np.ndarray:
a__ : int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(A__ , A__ )
# Predict target for test data
a__ : Union[str, Any] = xgb.predict(A__ )
a__ : Optional[Any] = predictions.reshape(len(A__ ) , 1 )
return predictions
def A_ ( ) -> None:
a__ : List[str] = fetch_california_housing()
a__ , a__ : int = data_handling(A__ )
a__ , a__ , a__ , a__ : Optional[int] = train_test_split(
A__ , A__ , test_size=0.25 , random_state=1 )
a__ : Optional[Any] = xgboost(A__ , A__ , A__ )
# Error printing
print(F'Mean Absolute Error : {mean_absolute_error(A__ , A__ )}' )
print(F'Mean Square Error : {mean_squared_error(A__ , A__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 99 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 0 |
"""simple docstring"""
__magic_name__ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 100 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self ,A__):
return 0.0
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 512
lowercase = [1] + [0] * (size - 1)
lowercase = [filter_type.process(lowerCAmelCase__ ) for item in inputs]
lowercase = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase = np.abs(np.fft.fft(lowerCAmelCase__ ) )
lowercase = 20 * np.logaa(lowerCAmelCase__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
lowercase = get_bounds(lowerCAmelCase__ , lowerCAmelCase__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowerCAmelCase__ )
plt.show()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 512
lowercase = [1] + [0] * (size - 1)
lowercase = [filter_type.process(lowerCAmelCase__ ) for item in inputs]
lowercase = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase = np.angle(np.fft.fft(lowerCAmelCase__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowerCAmelCase__ , -2 * pi ) )
plt.show()
| 101 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 0 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
SCREAMING_SNAKE_CASE : Optional[int] = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
SCREAMING_SNAKE_CASE : Optional[int] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int]=False , _snake_case : int=False , _snake_case : Optional[Any]=True , _snake_case : Optional[int]=False , _snake_case : Optional[int]="dummy_doc" ) ->int:
"""simple docstring"""
__snake_case : Any = {doc: key_lines}
__snake_case : str = {doc: sys_lines}
__snake_case : Any = {}
__snake_case : Union[str, Any] = 0
__snake_case : Dict = 0
__snake_case : Optional[Any] = 0
__snake_case : List[Any] = 0
__snake_case : int = 0
__snake_case : Optional[Any] = 0
__snake_case , __snake_case : str = reader.get_doc_mentions(_snake_case , key_doc_lines[doc] , _snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
__snake_case : List[Any] = reader.set_annotated_parse_trees(_snake_case , key_doc_lines[doc] , _snake_case , _snake_case )
__snake_case , __snake_case : int = reader.get_doc_mentions(_snake_case , sys_doc_lines[doc] , _snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
__snake_case : str = reader.set_annotated_parse_trees(_snake_case , key_doc_lines[doc] , _snake_case , _snake_case )
if remove_nested:
__snake_case , __snake_case : Optional[int] = reader.remove_nested_coref_mentions(_snake_case , _snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__snake_case , __snake_case : int = reader.remove_nested_coref_mentions(_snake_case , _snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__snake_case : List[Any] = reader.get_mention_assignments(_snake_case , _snake_case )
__snake_case : Any = reader.get_mention_assignments(_snake_case , _snake_case )
__snake_case : str = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def lowercase ( _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any] ) ->Optional[int]:
"""simple docstring"""
__snake_case : str = get_coref_infos(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
__snake_case : Union[str, Any] = {}
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = 0
for name, metric in metrics:
__snake_case , __snake_case , __snake_case : Tuple = evaluator.evaluate_documents(_snake_case , _snake_case , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
__snake_case : Optional[Any] = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
__snake_case : Any = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
__snake_case : Dict = line.split()[5]
if not parse_col == "-":
__snake_case : Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=True , a_=False , a_=False , a_=False ):
'''simple docstring'''
__snake_case : Optional[int] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
__snake_case : Optional[int] = util.check_gold_parse_annotation(a_ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__snake_case : Tuple = evaluate(
key_lines=a_ , sys_lines=a_ , metrics=a_ , NP_only=a_ , remove_nested=a_ , keep_singletons=a_ , min_span=a_ , )
return score
| 102 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 0 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 103 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__lowercase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__lowercase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
# pass variant but use the non-variant filenames
__lowercase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__lowercase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowercase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__lowercase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# pass variant but use the non-variant filenames
__lowercase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__lowercase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__lowercase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
| 104 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _SCREAMING_SNAKE_CASE ( _lowercase : str = "" ) ->dict[str, float]:
'''simple docstring'''
a : List[str] = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
a : Dict = BeautifulSoup(requests.get(_lowercase ).text , "html.parser" )
a : Union[str, Any] = soup.find_all("td" , attrs="titleColumn" )
a : int = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowercase , _lowercase )
}
def _SCREAMING_SNAKE_CASE ( _lowercase : str = "IMDb_Top_250_Movies.csv" ) ->None:
'''simple docstring'''
a : Optional[int] = get_imdb_top_aaa_movies()
with open(_lowercase , "w" , newline="" ) as out_file:
a : int = csv.writer(_lowercase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 105 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = 42 # [batch_size x 3]
lowercase__ = 42 # [batch_size x 3]
lowercase__ = 42 # [batch_size x 3]
lowercase__ = 42 # [batch_size x 3]
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
def __lowerCAmelCase ( self : Union[str, Any] ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __lowerCAmelCase ( self : int ):
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa ) )
def __lowerCAmelCase ( self : str ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa ) )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Any = torch.arange(self.height * self.width )
lowerCAmelCase__ : str = torch.stack(
[
pixel_indices % self.width,
torch.div(lowercase_ ,self.width ,rounding_mode='''trunc''' ),
] ,axis=1 ,)
return coords
@property
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ ,*lowerCAmelCase__ : List[Any] = self.shape
lowerCAmelCase__ : Optional[int] = int(np.prod(lowercase_ ) )
lowerCAmelCase__ : Any = self.get_image_coords()
lowerCAmelCase__ : Optional[int] = torch.broadcast_to(coords.unsqueeze(0 ) ,[batch_size * inner_batch_size, *coords.shape] )
lowerCAmelCase__ : str = self.get_camera_rays(lowercase_ )
lowerCAmelCase__ : List[str] = rays.view(lowercase_ ,inner_batch_size * self.height * self.width ,2 ,3 )
return rays
def __lowerCAmelCase ( self : List[str] ,lowercase_ : torch.Tensor ):
lowerCAmelCase__ ,*lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCAmelCase__ : Tuple = coords.view(lowercase_ ,-1 ,2 )
lowerCAmelCase__ : Union[str, Any] = self.resolution()
lowerCAmelCase__ : Tuple = self.fov()
lowerCAmelCase__ : List[str] = (flat.float() / (res - 1)) * 2 - 1
lowerCAmelCase__ : Any = fracs * torch.tan(fov / 2 )
lowerCAmelCase__ : List[str] = fracs.view(lowercase_ ,-1 ,2 )
lowerCAmelCase__ : Optional[int] = (
self.z.view(lowercase_ ,1 ,3 )
+ self.x.view(lowercase_ ,1 ,3 ) * fracs[:, :, :1]
+ self.y.view(lowercase_ ,1 ,3 ) * fracs[:, :, 1:]
)
lowerCAmelCase__ : Optional[int] = directions / directions.norm(dim=-1 ,keepdim=lowercase_ )
lowerCAmelCase__ : Tuple = torch.stack(
[
torch.broadcast_to(self.origin.view(lowercase_ ,1 ,3 ) ,[batch_size, directions.shape[1], 3] ),
directions,
] ,dim=2 ,)
return rays.view(lowercase_ ,*lowercase_ ,2 ,3 )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : int ,lowercase_ : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=lowercase_ ,height=lowercase_ ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[str] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowerCAmelCase__ : Any = np.array([np.sin(A_ ), np.cos(A_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCAmelCase__ : Any = -z * 4
lowerCAmelCase__ : Optional[Any] = np.array([np.cos(A_ ), -np.sin(A_ ), 0.0] )
lowerCAmelCase__ : Any = np.cross(A_ , A_ )
origins.append(A_ )
xs.append(A_ )
ys.append(A_ )
zs.append(A_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A_ , axis=0 ) ).float() , width=A_ , height=A_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A_ )) , )
| 106 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["""audio_values""", """audio_mask"""]
def __init__( self : int , __lowerCamelCase : Optional[int]=20_48 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Union[str, Any]=[16, 16] , __lowerCamelCase : Union[str, Any]=1_28 , __lowerCamelCase : Any=4_41_00 , __lowerCamelCase : str=86 , __lowerCamelCase : str=20_48 , __lowerCamelCase : Tuple=0.0 , **__lowerCamelCase : List[str] , ) -> List[Any]:
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase , )
a = spectrogram_length
a = num_channels
a = patch_size
a = feature_size // self.patch_size[1]
a = n_fft
a = sampling_rate // hop_length_to_sampling_rate
a = sampling_rate
a = padding_value
a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=__lowerCamelCase , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCAmelCase ( self : str , __lowerCamelCase : np.array ) -> np.ndarray:
a = spectrogram(
__lowerCamelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
a = log_spec[:, :-1]
a = log_spec - 20.0
a = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[Any] , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , **__lowerCamelCase : List[str] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
a = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCamelCase ):
a = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a = np.array(__lowerCamelCase ).astype(np.floataa )
# convert into correct format for padding
a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a = np.ones([len(__lowerCamelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a = padded_audio_features * self.padding_value
for i in range(len(__lowerCamelCase ) ):
a = audio_features[i]
a = feature
# return as BatchFeature
if return_attention_mask:
a = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
a = {"audio_values": padded_audio_features}
a = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 107 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 0 |
"""simple docstring"""
from collections.abc import Callable
def a__ ( SCREAMING_SNAKE_CASE : Callable[[float], float] , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
lowerCAmelCase : float = a
lowerCAmelCase : float = b
if function(SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
lowerCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) < 0:
lowerCAmelCase : str = mid
else:
lowerCAmelCase : str = mid
lowerCAmelCase : List[Any] = start + (end - start) / 2.0
return mid
def a__ ( SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 108 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 0 |
"""simple docstring"""
import re
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Optional[int] = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(UpperCamelCase , UpperCamelCase ) )
if __name__ == "__main__":
A: int = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 109 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
__snake_case = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
__snake_case = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
__snake_case = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__snake_case = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__snake_case = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : str =VOCAB_FILES_NAMES
UpperCamelCase_ : Union[str, Any] =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Dict =DPRContextEncoderTokenizer
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : int =VOCAB_FILES_NAMES
UpperCamelCase_ : Any =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : List[str] =DPRQuestionEncoderTokenizer
__snake_case = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__snake_case = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__snake_case = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(lowercase )
class UpperCAmelCase_ :
"""simple docstring"""
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
elif titles is None or texts is None:
UpperCamelCase :int = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Optional[Any] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [titles]
UpperCamelCase :int = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [texts]
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [questions] * n_passages
assert len(SCREAMING_SNAKE_CASE_ ) == len(
SCREAMING_SNAKE_CASE_ ), F'''There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_ )} titles and {len(SCREAMING_SNAKE_CASE_ )} texts.'''
UpperCamelCase :Optional[Any] = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids''']
UpperCamelCase :Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids''']
UpperCamelCase :Any = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
}
if return_attention_mask is not False:
UpperCamelCase :Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCamelCase :str = attention_mask
return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 64 , SCREAMING_SNAKE_CASE_ = 4 , ) -> List[DPRSpanPrediction]:
UpperCamelCase :List[Any] = reader_input['''input_ids''']
UpperCamelCase :int = reader_output[:3]
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = sorted(range(SCREAMING_SNAKE_CASE_ ) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__ )
UpperCamelCase :List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCamelCase :str = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCamelCase :Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase :int = sequence_ids.index(self.pad_token_id )
else:
UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> List[DPRSpanPrediction]:
UpperCamelCase :List[str] = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCamelCase :int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
UpperCamelCase :Any = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowercase )
class UpperCAmelCase_ ( lowercase, lowercase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[int] =READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : List[Any] =['input_ids', 'attention_mask']
UpperCamelCase_ : List[Any] =DPRReaderTokenizer
| 259 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = GPTaTokenizer
lowercase__ = GPTaTokenizerFast
lowercase__ = True
lowercase__ = {"""add_prefix_space""": True}
lowercase__ = False
def lowercase_ ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
a : List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
a : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a : Dict = {'''unk_token''': '''<unk>'''}
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowercase_ ( self : Any , **__snake_case : Tuple ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Union[str, Any] , **__snake_case : Optional[int] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] ):
a : List[Any] = '''lower newer'''
a : List[str] = '''lower newer'''
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
a : Union[str, Any] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a : Optional[Any] = '''lower newer'''
a : str = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a : List[str] = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
a : Tuple = tokens + [tokenizer.unk_token]
a : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
a : Union[str, Any] = self.get_tokenizer()
a : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=__snake_case )
a : Any = '''lower newer'''
# Testing tokenization
a : int = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
a : List[str] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids without special tokens
a : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
a : List[str] = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids with special tokens
a : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__snake_case )
a : List[str] = tokenizer.encode(__snake_case , add_prefix_space=__snake_case )
a : Optional[int] = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing the unknown token
a : List[str] = tokens + [rust_tokenizer.unk_token]
a : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowercase_ ( self : Tuple , *__snake_case : int , **__snake_case : int ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowercase_ ( self : Tuple , __snake_case : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : Tuple = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
# Simple input
a : Optional[int] = '''This is a simple input'''
a : Optional[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
a : Dict = ('''This is a simple input''', '''This is a pair''')
a : Optional[int] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length' )
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length' )
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length' )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length' )
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , )
def lowercase_ ( self : str ):
a : str = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
a : Optional[int] = '''This is a simple input'''
a : List[str] = ['''This is a simple input looooooooong''', '''This is a simple input''']
a : List[Any] = ('''This is a simple input''', '''This is a pair''')
a : List[str] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
a : int = tokenizer.pad_token_id
a : str = tokenizer(__snake_case , padding='max_length' , max_length=30 , return_tensors='np' )
a : List[str] = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors='np' )
a : List[str] = tokenizer(*__snake_case , padding='max_length' , max_length=60 , return_tensors='np' )
a : Any = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def lowercase_ ( self : Any ):
a : Tuple = '''$$$'''
a : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__snake_case , add_bos_token=__snake_case )
a : Optional[Any] = '''This is a simple input'''
a : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
a : str = tokenizer.bos_token_id
a : List[Any] = tokenizer(__snake_case )
a : Union[str, Any] = tokenizer(__snake_case )
self.assertEqual(out_s.input_ids[0] , __snake_case )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a : Dict = tokenizer.decode(out_s.input_ids )
a : Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __snake_case )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowercase_ ( self : List[str] ):
pass
def lowercase_ ( self : Any ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
a : Any = [self.get_tokenizer(do_lower_case=__snake_case , add_bos_token=__snake_case )]
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
a : List[str] = '''Encode this.'''
a : List[Any] = '''This one too please.'''
a : str = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
encoded_sequence += tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
a : List[Any] = tokenizer.encode_plus(
__snake_case , __snake_case , add_special_tokens=__snake_case , return_special_tokens_mask=__snake_case , )
a : Optional[int] = encoded_sequence_dict['''input_ids''']
a : str = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
a : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__snake_case )
]
a : Optional[int] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__snake_case , __snake_case )
@require_tokenizers
class a__( unittest.TestCase ):
def lowercase_ ( self : str ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
a : Dict = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=__snake_case )
a : Tuple = '''A photo of a cat'''
a : str = tokenizer.encode(
__snake_case , )
self.assertEqual(__snake_case , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('test_opt' )
a : Union[str, Any] = AutoTokenizer.from_pretrained('./test_opt' )
a : Dict = tokenizer.encode(
__snake_case , )
self.assertEqual(__snake_case , [2, 2_50, 13_45, 9, 10, 47_58] )
def lowercase_ ( self : str ):
a : Dict = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=__snake_case )
a : Optional[Any] = '''A photo of a cat'''
a : Union[str, Any] = tokenizer.encode(
__snake_case , )
# Same as above
self.assertEqual(__snake_case , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def lowercase_ ( self : int ):
a : int = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=__snake_case )
a : Optional[Any] = '''bos'''
a : Optional[int] = tokenizer.get_vocab()['''bos''']
a : Tuple = '''A photo of a cat'''
a : Optional[Any] = tokenizer.encode(
__snake_case , )
# We changed the bos token
self.assertEqual(__snake_case , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('./tok' )
a : List[str] = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
a : List[str] = tokenizer.encode(
__snake_case , )
self.assertEqual(__snake_case , [3_19_57, 2_50, 13_45, 9, 10, 47_58] ) | 297 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 0 |
"""simple docstring"""
def _A (__a , __a , __a , __a ) -> int:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _A (__a , __a , __a ) -> List[str]:
"""simple docstring"""
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
SCREAMING_SNAKE_CASE_ : List[Any] = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
SCREAMING_SNAKE_CASE_ : Union[str, Any] = -1
return False
def _A (__a , __a = 0 ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
SCREAMING_SNAKE_CASE_ : List[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 91 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Any = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = ['DeiTFeatureExtractor']
lowerCAmelCase :Union[str, Any] = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCAmelCase :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case_ ( self : Any ):
torch.manual_seed(0 )
__lowercase : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def snake_case_ ( self : List[str] ):
__lowercase : Any = self.dummy_uncond_unet
__lowercase : int = PNDMScheduler()
__lowercase : Tuple = PNDMPipeline(unet=_snake_case , scheduler=_snake_case )
pndm.to(_snake_case )
pndm.set_progress_bar_config(disable=_snake_case )
__lowercase : List[str] = torch.manual_seed(0 )
__lowercase : Optional[int] = pndm(generator=_snake_case , num_inference_steps=20 , output_type='''numpy''' ).images
__lowercase : List[Any] = torch.manual_seed(0 )
__lowercase : Any = pndm(generator=_snake_case , num_inference_steps=20 , output_type='''numpy''' , return_dict=_snake_case )[0]
__lowercase : str = image[0, -3:, -3:, -1]
__lowercase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
__lowercase : int = '''google/ddpm-cifar10-32'''
__lowercase : Tuple = UNetaDModel.from_pretrained(_snake_case )
__lowercase : Any = PNDMScheduler()
__lowercase : int = PNDMPipeline(unet=_snake_case , scheduler=_snake_case )
pndm.to(_snake_case )
pndm.set_progress_bar_config(disable=_snake_case )
__lowercase : List[Any] = torch.manual_seed(0 )
__lowercase : Optional[Any] = pndm(generator=_snake_case , output_type='''numpy''' ).images
__lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase : List[Any] = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 156 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__UpperCamelCase = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCamelCase = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCamelCase = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCamelCase = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__UpperCamelCase = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__UpperCamelCase = {
'num_train_timesteps': 151,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> str:
snake_case_ = checkpoint[f'{old_prefix}.in_layers.0.weight']
snake_case_ = checkpoint[f'{old_prefix}.in_layers.0.bias']
snake_case_ = checkpoint[f'{old_prefix}.in_layers.2.weight']
snake_case_ = checkpoint[f'{old_prefix}.in_layers.2.bias']
snake_case_ = checkpoint[f'{old_prefix}.emb_layers.1.weight']
snake_case_ = checkpoint[f'{old_prefix}.emb_layers.1.bias']
snake_case_ = checkpoint[f'{old_prefix}.out_layers.0.weight']
snake_case_ = checkpoint[f'{old_prefix}.out_layers.0.bias']
snake_case_ = checkpoint[f'{old_prefix}.out_layers.3.weight']
snake_case_ = checkpoint[f'{old_prefix}.out_layers.3.bias']
if has_skip:
snake_case_ = checkpoint[f'{old_prefix}.skip_connection.weight']
snake_case_ = checkpoint[f'{old_prefix}.skip_connection.bias']
return new_checkpoint
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Union[str, Any]:
snake_case_ = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
snake_case_ = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
snake_case_ = checkpoint[f'{old_prefix}.norm.weight']
snake_case_ = checkpoint[f'{old_prefix}.norm.bias']
snake_case_ = weight_q.squeeze(-1 ).squeeze(-1 )
snake_case_ = bias_q.squeeze(-1 ).squeeze(-1 )
snake_case_ = weight_k.squeeze(-1 ).squeeze(-1 )
snake_case_ = bias_k.squeeze(-1 ).squeeze(-1 )
snake_case_ = weight_v.squeeze(-1 ).squeeze(-1 )
snake_case_ = bias_v.squeeze(-1 ).squeeze(-1 )
snake_case_ = (
checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
snake_case_ = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = torch.load(UpperCamelCase__ , map_location='cpu' )
snake_case_ = {}
snake_case_ = checkpoint['''time_embed.0.weight''']
snake_case_ = checkpoint['''time_embed.0.bias''']
snake_case_ = checkpoint['''time_embed.2.weight''']
snake_case_ = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
snake_case_ = checkpoint['''label_emb.weight''']
snake_case_ = checkpoint['''input_blocks.0.0.weight''']
snake_case_ = checkpoint['''input_blocks.0.0.bias''']
snake_case_ = unet_config['''down_block_types''']
snake_case_ = unet_config['''layers_per_block''']
snake_case_ = unet_config['''attention_head_dim''']
snake_case_ = unet_config['''block_out_channels''']
snake_case_ = 1
snake_case_ = channels_list[0]
for i, layer_type in enumerate(UpperCamelCase__ ):
snake_case_ = channels_list[i]
snake_case_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCamelCase__ ):
snake_case_ = f'down_blocks.{i}.resnets.{j}'
snake_case_ = f'input_blocks.{current_layer}.0'
snake_case_ = True if j == 0 and downsample_block_has_skip else False
snake_case_ = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , has_skip=UpperCamelCase__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCamelCase__ ):
snake_case_ = f'down_blocks.{i}.resnets.{j}'
snake_case_ = f'input_blocks.{current_layer}.0'
snake_case_ = True if j == 0 and downsample_block_has_skip else False
snake_case_ = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , has_skip=UpperCamelCase__ )
snake_case_ = f'down_blocks.{i}.attentions.{j}'
snake_case_ = f'input_blocks.{current_layer}.1'
snake_case_ = convert_attention(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
current_layer += 1
if i != len(UpperCamelCase__ ) - 1:
snake_case_ = f'down_blocks.{i}.downsamplers.0'
snake_case_ = f'input_blocks.{current_layer}.0'
snake_case_ = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
current_layer += 1
snake_case_ = current_channels
# hardcoded the mid-block for now
snake_case_ = '''mid_block.resnets.0'''
snake_case_ = '''middle_block.0'''
snake_case_ = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = '''mid_block.attentions.0'''
snake_case_ = '''middle_block.1'''
snake_case_ = convert_attention(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = '''mid_block.resnets.1'''
snake_case_ = '''middle_block.2'''
snake_case_ = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = 0
snake_case_ = unet_config['''up_block_types''']
for i, layer_type in enumerate(UpperCamelCase__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
snake_case_ = f'up_blocks.{i}.resnets.{j}'
snake_case_ = f'output_blocks.{current_layer}.0'
snake_case_ = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , has_skip=UpperCamelCase__ )
current_layer += 1
if i != len(UpperCamelCase__ ) - 1:
snake_case_ = f'up_blocks.{i}.upsamplers.0'
snake_case_ = f'output_blocks.{current_layer-1}.1'
snake_case_ = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
snake_case_ = f'up_blocks.{i}.resnets.{j}'
snake_case_ = f'output_blocks.{current_layer}.0'
snake_case_ = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , has_skip=UpperCamelCase__ )
snake_case_ = f'up_blocks.{i}.attentions.{j}'
snake_case_ = f'output_blocks.{current_layer}.1'
snake_case_ = convert_attention(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
current_layer += 1
if i != len(UpperCamelCase__ ) - 1:
snake_case_ = f'up_blocks.{i}.upsamplers.0'
snake_case_ = f'output_blocks.{current_layer-1}.2'
snake_case_ = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = checkpoint['''out.0.weight''']
snake_case_ = checkpoint['''out.0.bias''']
snake_case_ = checkpoint['''out.2.weight''']
snake_case_ = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = strabool(args.class_cond)
__UpperCamelCase = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__UpperCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__UpperCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__UpperCamelCase = None
__UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
__UpperCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__UpperCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
__UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
__UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 69 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = u
for i in range(1, UpperCamelCase__ ):
_UpperCamelCase = temp * (u - i)
return temp
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = int(input('''enter the numbers of values: ''' ) )
_UpperCamelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
_UpperCamelCase = 0
print('''enter the values of parameters in a list: ''' )
_UpperCamelCase = list(map(UpperCamelCase__, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
_UpperCamelCase = float(input() )
_UpperCamelCase = int(input('''enter the value to interpolate: ''' ) )
_UpperCamelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, UpperCamelCase__ ):
for j in range(n - i ):
_UpperCamelCase = y[j + 1][i - 1] - y[j][i - 1]
_UpperCamelCase = y[0][0]
for i in range(1, UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__, UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 194 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase_ = logging.get_logger(__name__)
class A :
"""simple docstring"""
def __init__( self : Optional[int],lowercase_ : Tuple = None,lowercase_ : List[str] = None,lowercase_ : Any=None,lowercase_ : str=None )-> Any:
'''simple docstring'''
if not conversation_id:
A__ = uuid.uuida()
if past_user_inputs is None:
A__ = []
if generated_responses is None:
A__ = []
A__ = conversation_id
A__ = past_user_inputs
A__ = generated_responses
A__ = text
def __eq__( self : Any,lowercase_ : Dict )-> List[Any]:
'''simple docstring'''
if not isinstance(lowercase_,lowercase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def snake_case__ ( self : str,lowercase_ : str,lowercase_ : Tuple = False )-> List[Any]:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
A__ = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
A__ = text
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
A__ = None
def snake_case__ ( self : List[Any],lowercase_ : str )-> Optional[Any]:
'''simple docstring'''
self.generated_responses.append(lowercase_ )
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : List[str] )-> Any:
'''simple docstring'''
A__ = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
A__ = '''user''' if is_user else '''bot'''
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
_UpperCAmelCase , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any,*lowercase_ : List[str],**lowercase_ : str )-> str:
'''simple docstring'''
super().__init__(*lowercase_,**lowercase_ )
if self.tokenizer.pad_token_id is None:
A__ = self.tokenizer.eos_token
def snake_case__ ( self : Any,lowercase_ : Tuple=None,lowercase_ : Optional[Any]=None,lowercase_ : Optional[int]=None,**lowercase_ : Optional[int] )-> Tuple:
'''simple docstring'''
A__ = {}
A__ = {}
A__ = {}
if min_length_for_response is not None:
A__ = min_length_for_response
if minimum_tokens is not None:
A__ = minimum_tokens
if "max_length" in generate_kwargs:
A__ = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any],lowercase_ : List[str],lowercase_ : Optional[Any]=0,**lowercase_ : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = super().__call__(lowercase_,num_workers=lowercase_,**lowercase_ )
if isinstance(lowercase_,lowercase_ ) and len(lowercase_ ) == 1:
return outputs[0]
return outputs
def snake_case__ ( self : List[Any],lowercase_ : Optional[Any],lowercase_ : Tuple=3_2 )-> Dict[str, Any]:
'''simple docstring'''
if not isinstance(lowercase_,lowercase_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer,'_build_conversation_input_ids' ):
A__ = self.tokenizer._build_conversation_input_ids(lowercase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
A__ = self._legacy_parse_and_tokenize(lowercase_ )
if self.framework == "pt":
A__ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
A__ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : List[str]=1_0,**lowercase_ : Any )-> int:
'''simple docstring'''
A__ = generate_kwargs.get('max_length',self.model.config.max_length )
A__ = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
A__ = max_length - minimum_tokens
A__ = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
A__ = model_inputs['''attention_mask'''][:, -trim:]
A__ = model_inputs.pop('conversation' )
A__ = max_length
A__ = self.model.generate(**lowercase_,**lowercase_ )
if self.model.config.is_encoder_decoder:
A__ = 1
else:
A__ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any],lowercase_ : Any=True )-> Union[str, Any]:
'''simple docstring'''
A__ = model_outputs['''output_ids''']
A__ = self.tokenizer.decode(
output_ids[0],skip_special_tokens=lowercase_,clean_up_tokenization_spaces=lowercase_,)
A__ = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(lowercase_ )
return conversation
def snake_case__ ( self : Tuple,lowercase_ : List[str] )-> Dict:
'''simple docstring'''
A__ = self.tokenizer.eos_token_id
A__ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) )
if len(lowercase_ ) > self.tokenizer.model_max_length:
A__ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 7 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( UpperCAmelCase_ : Dict="" ):
"""simple docstring"""
a :Optional[Any] = tempfile.mkdtemp()
return os.path.join(UpperCamelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = torch.rand(12 , dtype=torch.floataa ) - 0.5
a :int = AgentAudio(_lowerCamelCase )
a :List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowerCamelCase , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_lowerCamelCase ) )
# Ensure that the file contains the same value as the original tensor
a :List[str] = sf.read(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , torch.tensor(_lowerCamelCase ) , atol=1e-4 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = torch.rand(12 , dtype=torch.floataa ) - 0.5
a :Optional[int] = get_new_path(suffix='''.wav''' )
sf.write(_lowerCamelCase , _lowerCamelCase , 1_6000 )
a :List[Any] = AgentAudio(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , _lowerCamelCase )
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = torch.randint(0 , 256 , (64, 64, 3) )
a :str = AgentImage(_lowerCamelCase )
a :Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowerCamelCase , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
a :Dict = Image.open(_lowerCamelCase )
a :Optional[int] = AgentImage(_lowerCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
a :List[str] = Image.open(_lowerCamelCase )
a :Union[str, Any] = AgentImage(_lowerCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowerCamelCase ) )
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = '''Hey!'''
a :Any = AgentText(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , agent_type.to_string() )
self.assertEqual(_lowerCamelCase , agent_type.to_raw() )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 94 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
UpperCAmelCase_ = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'''do_convert_rgb''': True,
}
UpperCAmelCase_ = os.path.join(self.tmpdirname , _snake_case)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(_snake_case , _snake_case)
def lowerCamelCase ( self : Dict , **_snake_case : Union[str, Any]):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Dict , **_snake_case : List[Any]):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , **_snake_case : Optional[Any]):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
processor_slow.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case)
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
processor_fast.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , _snake_case)
self.assertIsInstance(processor_fast.tokenizer , _snake_case)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , _snake_case)
self.assertIsInstance(processor_fast.image_processor , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_snake_case)
UpperCAmelCase_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_snake_case)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _snake_case)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_snake_case , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = '''Alexandra,T-shirt的价格是15便士。'''
UpperCAmelCase_ = processor(text=_snake_case)
UpperCAmelCase_ = tokenizer(_snake_case)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = '''Alexandra,T-shirt的价格是15便士。'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_snake_case , images=_snake_case)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(_snake_case):
processor()
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_snake_case)
UpperCAmelCase_ = tokenizer.batch_decode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = '''Alexandra,T-shirt的价格是15便士。'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_snake_case , images=_snake_case)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 51 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
A__: Optional[int] = 1.054_571_817e-34 # unit of ℏ : J * s
A__: Optional[int] = 3e8 # unit of c : m * s^-1
def lowerCAmelCase_ ( A_ ,A_ ,A_):
if (force, area, distance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if force < 0:
raise ValueError("Magnitude of force can not be negative")
if distance < 0:
raise ValueError("Distance can not be negative")
if area < 0:
raise ValueError("Area can not be negative")
if force == 0:
UpperCamelCase__: List[str] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
UpperCamelCase__: List[Any] = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
UpperCamelCase__: Optional[int] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0")
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : List[str] =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : List[str] =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : str =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : int =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : int =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : Any =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=lowercase ):
"""simple docstring"""
UpperCamelCase_ : int =['flax']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
requires_backends(cls , ['''flax'''] )
| 259 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class a__( unittest.TestCase ):
def __init__( self : Union[str, Any] , __snake_case : str , __snake_case : List[str]=13 , __snake_case : str=30 , __snake_case : str=2 , __snake_case : Union[str, Any]=3 , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : List[Any]=32 , __snake_case : Optional[int]=5 , __snake_case : int=4 , __snake_case : int=37 , __snake_case : Optional[Any]="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.1 , __snake_case : List[Any]=10 , __snake_case : Union[str, Any]=0.02 , ):
a : Union[str, Any] = parent
a : Any = batch_size
a : Union[str, Any] = image_size
a : Optional[int] = patch_size
a : Union[str, Any] = num_channels
a : List[str] = is_training
a : List[Any] = use_labels
a : Optional[Any] = hidden_size
a : List[Any] = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : int = intermediate_size
a : Dict = hidden_act
a : List[str] = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : List[str] = (image_size // patch_size) ** 2
a : List[str] = num_patches + 1
def lowercase_ ( self : Union[str, Any] ):
a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : str = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, pixel_values
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[Any] ):
a : str = FlaxViTModel(config=__snake_case )
a : List[str] = model(__snake_case )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
a : Tuple = (self.image_size, self.image_size)
a : List[Any] = (self.patch_size, self.patch_size)
a : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] ):
a : Any = self.type_sequence_label_size
a : Tuple = FlaxViTForImageClassification(config=__snake_case )
a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a : Dict = 1
a : List[str] = FlaxViTForImageClassification(__snake_case )
a : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Union[str, Any] = model(__snake_case )
def lowercase_ ( self : Dict ):
a : int = self.prepare_config_and_inputs()
(
a
) : Any = config_and_inputs
a : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
a : List[str] = FlaxViTModelTester(self )
a : str = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : List[Any] ):
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def lowercase_ ( self : Any ):
a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[Any] = model_class(__snake_case )
a : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : int = [*signature.parameters.keys()]
a : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def lowercase_ ( self : Any ):
a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a : Dict = self._prepare_for_class(__snake_case , __snake_case )
a : Union[str, Any] = model_class(__snake_case )
@jax.jit
def model_jitted(__snake_case : Tuple , **__snake_case : Tuple ):
return model(pixel_values=__snake_case , **__snake_case )
with self.subTest('JIT Enabled' ):
a : List[str] = model_jitted(**__snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a : Tuple = model_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
a : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
a : Tuple = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(__snake_case ) | 297 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "gptj"
__UpperCamelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , lowercase_ : Dict=50400 , lowercase_ : List[Any]=2048 , lowercase_ : int=4096 , lowercase_ : List[str]=28 , lowercase_ : Tuple=16 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : str="gelu_new" , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=0.0 , lowercase_ : str=1e-5 , lowercase_ : int=0.02 , lowercase_ : Any=True , lowercase_ : str=50256 , lowercase_ : Optional[int]=50256 , lowercase_ : int=False , **lowercase_ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = n_positions
SCREAMING_SNAKE_CASE_ : Optional[Any] = n_embd
SCREAMING_SNAKE_CASE_ : Dict = n_layer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n_inner
SCREAMING_SNAKE_CASE_ : int = rotary_dim
SCREAMING_SNAKE_CASE_ : Tuple = activation_function
SCREAMING_SNAKE_CASE_ : int = resid_pdrop
SCREAMING_SNAKE_CASE_ : str = embd_pdrop
SCREAMING_SNAKE_CASE_ : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE_ : Any = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : int = use_cache
SCREAMING_SNAKE_CASE_ : Any = bos_token_id
SCREAMING_SNAKE_CASE_ : str = eos_token_id
super().__init__(
bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Tuple = "default" , lowercase_ : Union[str, Any] = None , lowercase_ : Any = False , ):
'''simple docstring'''
super().__init__(lowercase_ , task=lowercase_ , patching_specs=lowercase_ , use_past=lowercase_)
if not getattr(self._config , '''pad_token_id''' , lowercase_):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE_ : List[str] = 0
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='''inputs''')
SCREAMING_SNAKE_CASE_ : List[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return self._config.n_layer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return self._config.n_head
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[int] , lowercase_ : Tuple = -1 , lowercase_ : Optional[Any] = -1 , lowercase_ : List[Any] = False , lowercase_ : Union[str, Any] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = super(lowercase_ , self).generate_dummy_inputs(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_)
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_ : Optional[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
SCREAMING_SNAKE_CASE_ : Optional[int] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ : Dict = seqlen + 2
SCREAMING_SNAKE_CASE_ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_ : List[Any] = [
(torch.zeros(lowercase_), torch.zeros(lowercase_)) for _ in range(self.num_layers)
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
SCREAMING_SNAKE_CASE_ : Any = ordered_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_)] , dim=1)
return ordered_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
return 13
| 91 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCAmelCase :Any = logging.get_logger(__name__)
def lowerCamelCase ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
"""simple docstring"""
def run_func(lowerCAmelCase : List[Any] ):
@wraps(UpperCamelCase__ )
def run_in_eager_mode(*lowerCAmelCase : List[str] , **lowerCAmelCase : Any ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
@wraps(UpperCamelCase__ )
@tf.function(experimental_compile=UpperCamelCase__ )
def run_in_graph_mode(*lowerCAmelCase : Any , **lowerCAmelCase : Dict ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Tuple = random.Random()
__magic_name__ : int = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCamelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : str = 42
A_ : Optional[int] = 42
A_ : Optional[Any] = """TensorFlow"""
@property
def __lowerCAmelCase ( self : str ) -> List[str]:
return tf.__version__
def __lowerCAmelCase ( self : List[Any] , _A : Optional[int] , _A : Dict , _A : int ) -> float:
# initialize GPU on separate process
__magic_name__ : str = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__magic_name__ : str = self._prepare_inference_func(_A , _A , _A )
return self._measure_speed(_inference )
def __lowerCAmelCase ( self : List[Any] , _A : Optional[int] , _A : Dict , _A : Optional[int] ) -> float:
__magic_name__ : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__magic_name__ : str = self._prepare_train_func(_A , _A , _A )
return self._measure_speed(_train )
def __lowerCAmelCase ( self : List[str] , _A : Any , _A : str , _A : Dict ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _A )
__magic_name__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__magic_name__ : Tuple = self._prepare_inference_func(_A , _A , _A )
return self._measure_memory(_inference )
def __lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : str , _A : Optional[int] ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _A )
__magic_name__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__magic_name__ : Optional[int] = self._prepare_train_func(_A , _A , _A )
return self._measure_memory(_train )
def __lowerCAmelCase ( self : Optional[Any] , _A : List[str] , _A : List[str] , _A : int ) -> Callable[[], None]:
__magic_name__ : Dict = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__magic_name__ : Dict = (
hasattr(_A , 'architectures' )
and isinstance(config.architectures , _A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__magic_name__ : Tuple = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__magic_name__ : Optional[Any] = __import__('transformers' , fromlist=[model_class] )
__magic_name__ : Any = getattr(_A , _A )
__magic_name__ : Any = model_cls(_A )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__magic_name__ : str = TF_MODEL_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
__magic_name__ : Optional[Any] = config.vocab_size if hasattr(_A , 'vocab_size' ) else config.encoder.vocab_size
__magic_name__ : List[Any] = random_input_ids(_A , _A , _A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_A , decoder_input_ids=_A , training=_A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_A , training=_A )
__magic_name__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Union[str, Any] ) -> Callable[[], None]:
__magic_name__ : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__magic_name__ : List[Any] = (
hasattr(_A , 'architectures' )
and isinstance(config.architectures , _A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__magic_name__ : Optional[int] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__magic_name__ : Any = __import__('transformers' , fromlist=[model_class] )
__magic_name__ : List[str] = getattr(_A , _A )
__magic_name__ : Union[str, Any] = model_cls(_A )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__magic_name__ : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
__magic_name__ : str = config.vocab_size if hasattr(_A , 'vocab_size' ) else config.encoder.vocab_size
__magic_name__ : int = random_input_ids(_A , _A , _A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__magic_name__ : Tuple = model(_A , decoder_input_ids=_A , labels=_A , training=_A )[0]
__magic_name__ : Union[str, Any] = tf.gradients(_A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__magic_name__ : Dict = model(_A , labels=_A , training=_A )[0]
__magic_name__ : List[Any] = tf.gradients(_A , model.trainable_variables )
return gradients
__magic_name__ : Any = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(_A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__magic_name__ : List[str] = timeit.repeat(
_A , repeat=self.args.repeat , number=10 , )
return min(_A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def __lowerCAmelCase ( self : Tuple , _A : Union[str, Any] ) -> [Memory, MemorySummary]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
__magic_name__ : Optional[Any] = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
__magic_name__ : Optional[Any] = '''N/A'''
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
__magic_name__ : int = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__magic_name__ : int = nvml.nvmlDeviceGetMemoryInfo(_A )
__magic_name__ : List[Any] = meminfo.used
__magic_name__ : Any = Memory(_A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
__magic_name__ : Union[str, Any] = None
else:
__magic_name__ : Optional[int] = measure_peak_memory_cpu(_A )
__magic_name__ : int = Memory(_A ) if isinstance(_A , _A ) else memory_bytes
if self.args.trace_memory_line_by_line:
__magic_name__ : List[Any] = stop_memory_tracing(_A )
if memory is None:
__magic_name__ : Tuple = summary.total
else:
__magic_name__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None | 331 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , _snake_case : str , ):
__lowercase : str = parent
__lowercase : Optional[int] = 13
__lowercase : List[str] = 7
__lowercase : Union[str, Any] = True
__lowercase : Any = True
__lowercase : Dict = False
__lowercase : int = True
__lowercase : Optional[int] = 99
__lowercase : Any = 32
__lowercase : Dict = 2
__lowercase : List[str] = 4
__lowercase : Optional[int] = 37
__lowercase : List[str] = '''gelu'''
__lowercase : int = 0.1
__lowercase : Optional[Any] = 0.1
__lowercase : Any = 512
__lowercase : Union[str, Any] = 16
__lowercase : Optional[int] = 2
__lowercase : List[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Any = 4
__lowercase : Optional[int] = None
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : str = None
if self.use_input_mask:
__lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Dict = None
__lowercase : Union[str, Any] = None
if self.use_labels:
__lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : str = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : Optional[Any] , _snake_case : str , _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Dict , _snake_case : Optional[Any] ):
__lowercase : Optional[Any] = TFDistilBertModel(config=_snake_case )
__lowercase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase : str = model(_snake_case )
__lowercase : Optional[Any] = [input_ids, input_mask]
__lowercase : Optional[int] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : Any , _snake_case : Dict , _snake_case : int , _snake_case : Optional[int] , _snake_case : str , _snake_case : Any , _snake_case : int ):
__lowercase : Dict = TFDistilBertForMaskedLM(config=_snake_case )
__lowercase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase : List[str] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : Dict , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : List[str] ):
__lowercase : str = TFDistilBertForQuestionAnswering(config=_snake_case )
__lowercase : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__lowercase : Dict = model(_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : str , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : str , _snake_case : Optional[int] ):
__lowercase : List[Any] = self.num_labels
__lowercase : List[Any] = TFDistilBertForSequenceClassification(_snake_case )
__lowercase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase : Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : Union[str, Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : int ):
__lowercase : Union[str, Any] = self.num_choices
__lowercase : Optional[Any] = TFDistilBertForMultipleChoice(_snake_case )
__lowercase : str = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__lowercase : Optional[int] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self : str , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : int ):
__lowercase : Union[str, Any] = self.num_labels
__lowercase : str = TFDistilBertForTokenClassification(_snake_case )
__lowercase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase : Any = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Optional[int] ):
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(__lowercase) : Any = config_and_inputs
__lowercase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A__ : List[Any] = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : Dict = False
A__ : Tuple = False
def snake_case_ ( self : int ):
__lowercase : List[str] = TFDistilBertModelTester(self )
__lowercase : int = ConfigTester(self , config_class=_snake_case , dim=37 )
def snake_case_ ( self : int ):
self.config_tester.run_common_tests()
def snake_case_ ( self : List[str] ):
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_snake_case )
def snake_case_ ( self : int ):
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_snake_case )
def snake_case_ ( self : List[str] ):
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_snake_case )
def snake_case_ ( self : Optional[Any] ):
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_snake_case )
def snake_case_ ( self : Optional[int] ):
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_snake_case )
def snake_case_ ( self : Any ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_snake_case )
@slow
def snake_case_ ( self : Union[str, Any] ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__lowercase : int = TFDistilBertModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self : Optional[int] ):
__lowercase : List[Any] = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__lowercase : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Any = model(_snake_case )[0]
__lowercase : Optional[Any] = [1, 6, 768]
self.assertEqual(output.shape , _snake_case )
__lowercase : int = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _snake_case , atol=1E-4 )
| 156 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 69 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase:
@staticmethod
def UpperCAmelCase ( *__a , **__a) -> Union[str, Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
lowercase__ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
_UpperCamelCase = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def UpperCAmelCase ( self , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = vqa_pipeline(__a , top_k=1)
self.assertEqual(
__a , [
[{'''score''': ANY(__a), '''answer''': ANY(__a)}],
[{'''score''': ANY(__a), '''answer''': ANY(__a)}],
] , )
@require_torch
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
_UpperCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase = '''How many cats are there?'''
_UpperCamelCase = vqa_pipeline(image=__a , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
__a , [{'''score''': ANY(__a), '''answer''': ANY(__a)}, {'''score''': ANY(__a), '''answer''': ANY(__a)}])
_UpperCamelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
__a , [{'''score''': ANY(__a), '''answer''': ANY(__a)}, {'''score''': ANY(__a), '''answer''': ANY(__a)}])
@slow
@require_torch
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
_UpperCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase = '''How many cats are there?'''
_UpperCamelCase = vqa_pipeline(image=__a , question=__a , top_k=2)
self.assertEqual(
nested_simplify(__a , decimals=4) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}])
_UpperCamelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(__a , decimals=4) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}])
_UpperCamelCase = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(__a , decimals=4) , [[{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
| 194 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'ctrl'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str,lowercase_ : Optional[Any]=2_4_6_5_3_4,lowercase_ : List[Any]=2_5_6,lowercase_ : List[str]=1_2_8_0,lowercase_ : List[Any]=8_1_9_2,lowercase_ : List[Any]=4_8,lowercase_ : List[Any]=1_6,lowercase_ : Tuple=0.1,lowercase_ : Any=0.1,lowercase_ : Union[str, Any]=1E-6,lowercase_ : Any=0.02,lowercase_ : Optional[Any]=True,**lowercase_ : Dict,)-> str:
'''simple docstring'''
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = dff
A__ = resid_pdrop
A__ = embd_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
super().__init__(**lowercase_ )
| 7 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _snake_case ( _snake_case , _snake_case ):
@register_to_config
def __init__( self , _lowerCamelCase = 768 , ):
super().__init__()
a :List[Any] = nn.Parameter(torch.zeros(1 , _lowerCamelCase ) )
a :Dict = nn.Parameter(torch.ones(1 , _lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase = None , _lowerCamelCase = None , ):
a :int = nn.Parameter(self.mean.to(_lowerCamelCase ).to(_lowerCamelCase ) )
a :int = nn.Parameter(self.std.to(_lowerCamelCase ).to(_lowerCamelCase ) )
return self
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Tuple = (embeds * self.std) + self.mean
return embeds
| 94 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __snake_case :
UpperCAmelCase__ : List[Any] = MBartConfig
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[Any] = '''gelu'''
def __init__( self : Dict , _snake_case : Any , _snake_case : int=13 , _snake_case : Union[str, Any]=7 , _snake_case : Tuple=True , _snake_case : Tuple=False , _snake_case : Dict=99 , _snake_case : str=32 , _snake_case : List[str]=2 , _snake_case : int=4 , _snake_case : List[str]=37 , _snake_case : int=0.1 , _snake_case : Dict=0.1 , _snake_case : Dict=20 , _snake_case : Tuple=2 , _snake_case : Tuple=1 , _snake_case : List[Any]=0 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ = prepare_mbart_inputs_dict(_snake_case , _snake_case , _snake_case)
return config, inputs_dict
def lowerCamelCase ( self : Tuple , _snake_case : List[str] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = TFMBartModel(config=_snake_case).get_decoder()
UpperCAmelCase_ = inputs_dict['''input_ids''']
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase_ = inputs_dict['''head_mask''']
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case)
UpperCAmelCase_ = outputs.to_tuple()
UpperCAmelCase_ = past_key_values[1]
def A (__A : Optional[int] , __A : Union[str, Any] , __A : List[Any] , __A : Union[str, Any]=None , __A : List[Any]=None , __A : Any=None , __A : Union[str, Any]=None , __A : str=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ : Tuple = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ : Optional[Any] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Any = False
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : int):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = TFMBartModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case)
@require_sentencepiece
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[str] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ : str = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ : Union[str, Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def lowerCamelCase ( self : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.translate_src_text(**_snake_case)
self.assertListEqual(self.expected_text , _snake_case)
def lowerCamelCase ( self : Union[str, Any] , **_snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer(self.src_text , **_snake_case , return_tensors='''tf''')
UpperCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2)
UpperCAmelCase_ = self.tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case)
return generated_words
@slow
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 51 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 0 |
def lowerCAmelCase_ ( A_ = 1_00):
UpperCamelCase__: str = (n * (n + 1) // 2) ** 2
UpperCamelCase__: Tuple = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 149 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 0 |
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Tuple = [[float('''inf''' ) for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
UpperCamelCase :str = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(UpperCamelCase__ ):
# looping through rows of graph array
for i in range(UpperCamelCase__ ):
# looping through columns of graph array
for j in range(UpperCamelCase__ ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCamelCase :List[str] = dist[i][k] + dist[k][j]
_print_dist(UpperCamelCase__ , UpperCamelCase__ )
return dist, v
if __name__ == "__main__":
__snake_case = int(input("""Enter number of vertices: """))
__snake_case = int(input("""Enter number of edges: """))
__snake_case = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
__snake_case = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
__snake_case = int(input("""Enter source:"""))
__snake_case = int(input("""Enter destination:"""))
__snake_case = float(input("""Enter weight:"""))
__snake_case = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 259 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase: str = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase: Tuple = TaTokenizerFast
lowerCAmelCase: List[Any] = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Union[str, Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 297 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self : List[Any] , lowercase_ : int = True , lowercase_ : str = None , lowercase_ : Union[str, Any] = PILImageResampling.BICUBIC , lowercase_ : Any = True , lowercase_ : Any = None , lowercase_ : Any = True , lowercase_ : str = 1 / 255 , lowercase_ : List[str] = True , lowercase_ : int = None , lowercase_ : Optional[Any] = None , lowercase_ : int = True , **lowercase_ : str , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='''crop_size''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_resize
SCREAMING_SNAKE_CASE_ : int = size
SCREAMING_SNAKE_CASE_ : List[Any] = resample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE_ : str = crop_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE_ : int = rescale_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ : Optional[int] = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] = PILImageResampling.BICUBIC , lowercase_ : Optional[Any] = None , **lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_resize_output_image_size(lowercase_ , size=size['''shortest_edge'''] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Any = None , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Any = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Any = None , **lowercase_ : str , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Tuple , lowercase_ : List[str] = None , lowercase_ : Union[str, Any] = None , lowercase_ : Optional[Any] = None , lowercase_ : int = None , lowercase_ : Tuple = None , lowercase_ : Optional[Any] = None , lowercase_ : Optional[Any] = None , lowercase_ : str = None , lowercase_ : Tuple = None , lowercase_ : int = None , lowercase_ : Union[str, Any] = None , lowercase_ : Tuple = None , lowercase_ : List[Any] = ChannelDimension.FIRST , **lowercase_ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(lowercase_ , param_name='''size''' , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''' , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : str = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ : Any = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ : int = [convert_to_rgb(lowercase_) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : str = [to_numpy_array(lowercase_) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : str = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : List[str] = [self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Any = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
SCREAMING_SNAKE_CASE_ : int = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
SCREAMING_SNAKE_CASE_ : Any = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 91 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase :Dict = logging.get_logger(__name__)
lowerCAmelCase :Tuple = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCamelCase ( lowercase__ , lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = """swin"""
A_ : int = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , _A : Tuple=224 , _A : int=4 , _A : int=3 , _A : Dict=96 , _A : int=[2, 2, 6, 2] , _A : Dict=[3, 6, 12, 24] , _A : List[str]=7 , _A : Tuple=4.0 , _A : Any=True , _A : Any=0.0 , _A : Optional[Any]=0.0 , _A : Optional[int]=0.1 , _A : Tuple="gelu" , _A : Union[str, Any]=False , _A : Union[str, Any]=0.02 , _A : List[Any]=1E-5 , _A : Optional[int]=32 , _A : str=None , _A : int=None , **_A : List[Any] , ) -> List[Any]:
super().__init__(**_A )
__magic_name__ : List[Any] = image_size
__magic_name__ : Tuple = patch_size
__magic_name__ : str = num_channels
__magic_name__ : Optional[Any] = embed_dim
__magic_name__ : str = depths
__magic_name__ : Dict = len(_A )
__magic_name__ : Dict = num_heads
__magic_name__ : Tuple = window_size
__magic_name__ : Optional[int] = mlp_ratio
__magic_name__ : Any = qkv_bias
__magic_name__ : Optional[int] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Dict = drop_path_rate
__magic_name__ : int = hidden_act
__magic_name__ : Union[str, Any] = use_absolute_embeddings
__magic_name__ : Optional[int] = layer_norm_eps
__magic_name__ : Optional[int] = initializer_range
__magic_name__ : List[str] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : Union[str, Any] = int(embed_dim * 2 ** (len(_A ) - 1) )
__magic_name__ : Dict = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(_A ) + 1 )]
__magic_name__ : Any = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCAmelCase ( self : Dict ) -> float:
return 1E-4 | 331 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[int]:
__lowercase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
__lowercase : Union[str, Any] = ''''''
else:
__lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : List[Any] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__lowercase : int = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Any = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Tuple = in_proj_bias[: config.hidden_size]
__lowercase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : int = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
__lowercase : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
__lowercase : List[Any] = dct.pop(UpperCamelCase__ )
__lowercase : Union[str, Any] = val
def UpperCAmelCase_ ( ) -> int:
__lowercase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : int = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ) -> str:
__lowercase : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowercase : int = 8
# set labels if required
if not base_model:
__lowercase : Optional[Any] = 1_000
__lowercase : str = '''huggingface/label-files'''
__lowercase : str = '''imagenet-1k-id2label.json'''
__lowercase : Tuple = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowercase : Tuple = idalabel
__lowercase : Tuple = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowercase : Tuple = 384
__lowercase : Optional[Any] = 1_536
__lowercase : str = 12
__lowercase : Union[str, Any] = 6
# load original model from torch hub
__lowercase : Optional[Any] = torch.hub.load('''facebookresearch/dino:main''' , UpperCamelCase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowercase : List[Any] = original_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase__ )
__lowercase : str = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
if base_model:
__lowercase : int = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ ).eval()
else:
__lowercase : List[Any] = ViTForImageClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowercase : Tuple = ViTImageProcessor()
__lowercase : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
__lowercase : int = encoding['''pixel_values''']
__lowercase : Tuple = model(UpperCamelCase__ )
if base_model:
__lowercase : Optional[int] = original_model(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__lowercase : Dict = original_model(UpperCamelCase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 156 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
SCREAMING_SNAKE_CASE_ = None # compression type in fsspec. ex: "gzip"
SCREAMING_SNAKE_CASE_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowerCAmelCase__ = "", lowerCAmelCase__ = None, lowerCAmelCase__ = None, **lowerCAmelCase__) -> Optional[Any]:
super().__init__(self, **lowerCAmelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ = fsspec.open(
lowerCAmelCase__, mode='rb', protocol=lowerCAmelCase__, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {}), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case_ = os.path.basename(self.file.path.split('::')[0])
snake_case_ = (
self.compressed_name[: self.compressed_name.rindex('.')]
if '''.''' in self.compressed_name
else self.compressed_name
)
snake_case_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Tuple:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCAmelCase__).lstrip('/')
def a_ ( self) -> Any:
if self.dir_cache is None:
snake_case_ = {**self.file.fs.info(self.file.path), '''name''': self.uncompressed_name}
snake_case_ = {f['''name''']: f}
def a_ ( self, lowerCAmelCase__) -> Tuple:
return self.file.open().read()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "rb", lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> List[Any]:
snake_case_ = self._strip_protocol(lowerCAmelCase__)
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'')
return self.file.open()
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "bz2"
SCREAMING_SNAKE_CASE_ = "bz2"
SCREAMING_SNAKE_CASE_ = ".bz2"
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "gzip"
SCREAMING_SNAKE_CASE_ = "gzip"
SCREAMING_SNAKE_CASE_ = ".gz"
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "lz4"
SCREAMING_SNAKE_CASE_ = "lz4"
SCREAMING_SNAKE_CASE_ = ".lz4"
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "xz"
SCREAMING_SNAKE_CASE_ = "xz"
SCREAMING_SNAKE_CASE_ = ".xz"
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "zstd"
SCREAMING_SNAKE_CASE_ = "zstd"
SCREAMING_SNAKE_CASE_ = ".zst"
def __init__( self, lowerCAmelCase__, lowerCAmelCase__ = "rb", lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = DEFAULT_BLOCK_SIZE, **lowerCAmelCase__, ) -> str:
super().__init__(
fo=lowerCAmelCase__, mode=lowerCAmelCase__, target_protocol=lowerCAmelCase__, target_options=lowerCAmelCase__, block_size=lowerCAmelCase__, **lowerCAmelCase__, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ = self.file.__enter__
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = file_
def __enter__( self) -> List[str]:
self._file.__enter__()
return self
def __exit__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Union[str, Any]:
self._file.__exit__(*lowerCAmelCase__, **lowerCAmelCase__)
def __iter__( self) -> Any:
return iter(self._file)
def a_ ( self) -> List[str]:
return next(self._file)
def __getattr__( self, lowerCAmelCase__) -> List[Any]:
return getattr(self._file, lowerCAmelCase__)
def fixed_enter(*lowerCAmelCase__, **lowerCAmelCase__):
return WrappedFile(_enter(*lowerCAmelCase__, **lowerCAmelCase__))
snake_case_ = fixed_enter
| 69 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'blip_text_model'
def __init__( self , __a=3_05_24 , __a=7_68 , __a=7_68 , __a=30_72 , __a=7_68 , __a=12 , __a=8 , __a=5_12 , __a="gelu" , __a=1e-12 , __a=0.0 , __a=0.0 , __a=0.02 , __a=3_05_22 , __a=2 , __a=0 , __a=1_02 , __a=True , __a=True , **__a , ) -> List[str]:
'''simple docstring'''
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = encoder_hidden_size
_UpperCamelCase = intermediate_size
_UpperCamelCase = projection_dim
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = hidden_act
_UpperCamelCase = initializer_range
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = is_decoder
_UpperCamelCase = use_cache
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__a)
_UpperCamelCase = cls.get_config_dict(__a , **__a)
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''') == "blip":
_UpperCamelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(__a , **__a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'blip_vision_model'
def __init__( self , __a=7_68 , __a=30_72 , __a=5_12 , __a=12 , __a=12 , __a=3_84 , __a=16 , __a="gelu" , __a=1e-5 , __a=0.0 , __a=1e-10 , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = intermediate_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = patch_size
_UpperCamelCase = image_size
_UpperCamelCase = initializer_range
_UpperCamelCase = attention_dropout
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = hidden_act
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__a)
_UpperCamelCase = cls.get_config_dict(__a , **__a)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''') == "blip":
_UpperCamelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(__a , **__a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'blip'
lowercase__ = True
def __init__( self , __a=None , __a=None , __a=5_12 , __a=2.6592 , __a=2_56 , **__a , ) -> Dict:
'''simple docstring'''
super().__init__(**__a)
if text_config is None:
_UpperCamelCase = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''')
if vision_config is None:
_UpperCamelCase = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''')
_UpperCamelCase = BlipTextConfig(**__a)
_UpperCamelCase = BlipVisionConfig(**__a)
_UpperCamelCase = self.vision_config.hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = logit_scale_init_value
_UpperCamelCase = 1.0
_UpperCamelCase = 0.02
_UpperCamelCase = image_text_hidden_size
@classmethod
def UpperCAmelCase ( cls , __a , __a , **__a) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
_UpperCamelCase = self.text_config.to_dict()
_UpperCamelCase = self.vision_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 194 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase_ = get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> str:
'''simple docstring'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ = os.path.join(UpperCamelCase__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
logger.info(f'Saving model to {ckpt_dir}' )
A__ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=UpperCamelCase__ , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ) -> List[Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(UpperCamelCase__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
A__ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Loading model from {input_model_file}' )
A__ = torch.load(UpperCamelCase__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Loading model from {input_model_file}' )
A__ = torch.load(UpperCamelCase__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ = (
os.path.join(UpperCamelCase__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
A__ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=UpperCamelCase__ , storage_reader=dist_cp.FileSystemReader(UpperCamelCase__ ) , planner=DefaultLoadPlanner() , )
A__ = state_dict['''model''']
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(UpperCamelCase__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> List[Any]:
'''simple docstring'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ = FSDP.optim_state_dict(UpperCamelCase__ , UpperCamelCase__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A__ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
A__ = os.path.join(UpperCamelCase__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> Tuple:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A__ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
A__ = torch.load(UpperCamelCase__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
A__ = (
os.path.join(UpperCamelCase__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
A__ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(UpperCamelCase__ ) , )
A__ = optim_state['''optimizer''']
logger.info(f'Optimizer loaded from {ckpt_dir}' )
A__ = FSDP.optim_state_dict_to_load(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
optimizer.load_state_dict(UpperCamelCase__ )
| 7 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
snake_case : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( ):
"""simple docstring"""
a :Optional[Any] = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
a :Optional[Any] = json.loads(UpperCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
a :Optional[int] = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
a :int = json.loads(UpperCamelCase__ )
if not mpi_options.get('''sagemaker_mpi_enabled''' , UpperCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE__ ( self ):
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , _lowerCamelCase , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
a :Dict = torch.device('''cpu''' )
a :Union[str, Any] = 0
elif is_sagemaker_model_parallel_available():
a :Optional[Any] = smp.local_rank()
a :str = torch.device('''cuda''' , _lowerCamelCase )
a :Optional[int] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
a :Optional[int] = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
a :List[str] = torch.device('''cuda''' , self.local_rank )
a :Optional[Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
a :List[str] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
a :List[str] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
a :str = torch.device('''cuda''' , self.local_rank )
a :Union[str, Any] = 1
if device.type == "cuda":
torch.cuda.set_device(_lowerCamelCase )
return device
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return False
| 94 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = '''wavlm'''
def __init__( self : Dict , _snake_case : Dict=32 , _snake_case : str=768 , _snake_case : Dict=12 , _snake_case : int=12 , _snake_case : Dict=3072 , _snake_case : str="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : List[str]=0.1 , _snake_case : List[Any]=0.1 , _snake_case : List[str]=0.0 , _snake_case : List[str]=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.0_2 , _snake_case : Dict=1e-5 , _snake_case : Any="group" , _snake_case : Any="gelu" , _snake_case : Tuple=(512, 512, 512, 512, 512, 512, 512) , _snake_case : List[Any]=(5, 2, 2, 2, 2, 2, 2) , _snake_case : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , _snake_case : Tuple=False , _snake_case : int=128 , _snake_case : Dict=16 , _snake_case : Tuple=320 , _snake_case : List[str]=800 , _snake_case : List[Any]=False , _snake_case : List[Any]=True , _snake_case : Any=0.0_5 , _snake_case : int=10 , _snake_case : Dict=2 , _snake_case : int=0.0 , _snake_case : Optional[int]=10 , _snake_case : Dict=320 , _snake_case : int=2 , _snake_case : Union[str, Any]=0.1 , _snake_case : List[Any]=100 , _snake_case : Optional[int]=256 , _snake_case : Optional[int]=256 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[Any]="mean" , _snake_case : Dict=False , _snake_case : Tuple=False , _snake_case : str=256 , _snake_case : List[Any]=(512, 512, 512, 512, 1500) , _snake_case : Tuple=(5, 3, 3, 1, 1) , _snake_case : Dict=(1, 2, 3, 1, 1) , _snake_case : Tuple=512 , _snake_case : Dict=80 , _snake_case : Union[str, Any]=0 , _snake_case : int=1 , _snake_case : List[Any]=2 , _snake_case : int=False , _snake_case : Union[str, Any]=3 , _snake_case : List[str]=2 , _snake_case : List[str]=3 , _snake_case : List[str]=None , **_snake_case : int , ):
"""simple docstring"""
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = feat_extract_norm
UpperCAmelCase_ = feat_extract_activation
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = conv_bias
UpperCAmelCase_ = num_buckets
UpperCAmelCase_ = max_bucket_distance
UpperCAmelCase_ = num_conv_pos_embeddings
UpperCAmelCase_ = num_conv_pos_embedding_groups
UpperCAmelCase_ = len(self.conv_dim)
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = feat_proj_dropout
UpperCAmelCase_ = final_dropout
UpperCAmelCase_ = layerdrop
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_ctc_classes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = do_stable_layer_norm
UpperCAmelCase_ = use_weighted_layer_sum
UpperCAmelCase_ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ = apply_spec_augment
UpperCAmelCase_ = mask_time_prob
UpperCAmelCase_ = mask_time_length
UpperCAmelCase_ = mask_time_min_masks
UpperCAmelCase_ = mask_feature_prob
UpperCAmelCase_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ = num_codevectors_per_group
UpperCAmelCase_ = num_codevector_groups
UpperCAmelCase_ = contrastive_logits_temperature
UpperCAmelCase_ = num_negatives
UpperCAmelCase_ = codevector_dim
UpperCAmelCase_ = proj_codevector_dim
UpperCAmelCase_ = diversity_loss_weight
# ctc loss
UpperCAmelCase_ = ctc_loss_reduction
UpperCAmelCase_ = ctc_zero_infinity
# adapter
UpperCAmelCase_ = add_adapter
UpperCAmelCase_ = adapter_kernel_size
UpperCAmelCase_ = adapter_stride
UpperCAmelCase_ = num_adapter_layers
UpperCAmelCase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = xvector_output_dim
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 51 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 0 |
def lowerCAmelCase_ ( ):
return [
a * b * (10_00 - a - b)
for a in range(1 ,9_99)
for b in range(UpperCamelCase__ ,9_99)
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"{solution() = }")
| 149 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = "▁" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "<unk>" , SCREAMING_SNAKE_CASE_ = "</s>" , SCREAMING_SNAKE_CASE_ = "<pad>" , ) -> List[Any]:
UpperCamelCase :List[Any] = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
UpperCamelCase :List[str] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCamelCase :Tuple = token_dict['''token''']
UpperCamelCase :Union[str, Any] = Tokenizer(Unigram() )
UpperCamelCase :Optional[Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
UpperCamelCase :Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ),
pre_tokenizers.Digits(individual_digits=SCREAMING_SNAKE_CASE_ ),
pre_tokenizers.Punctuation(),
] )
UpperCamelCase :Optional[int] = decoders.Metaspace(replacement=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
UpperCamelCase :Optional[Any] = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 8000 , SCREAMING_SNAKE_CASE_ = True , ) -> Optional[int]:
UpperCamelCase :Tuple = trainers.UnigramTrainer(
vocab_size=SCREAMING_SNAKE_CASE_ , special_tokens=self.special_tokens_list , show_progress=SCREAMING_SNAKE_CASE_ , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :List[Any] = [files]
self._tokenizer.train(SCREAMING_SNAKE_CASE_ , trainer=SCREAMING_SNAKE_CASE_ )
self.add_unk_id()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 8000 , SCREAMING_SNAKE_CASE_ = True , ) -> List[Any]:
UpperCamelCase :Dict = trainers.UnigramTrainer(
vocab_size=SCREAMING_SNAKE_CASE_ , special_tokens=self.special_tokens_list , show_progress=SCREAMING_SNAKE_CASE_ , )
self._tokenizer.train_from_iterator(SCREAMING_SNAKE_CASE_ , trainer=SCREAMING_SNAKE_CASE_ )
self.add_unk_id()
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[int] = json.loads(self._tokenizer.to_str() )
UpperCamelCase :int = self.special_tokens['''unk''']['''id''']
UpperCamelCase :List[str] = Tokenizer.from_str(json.dumps(SCREAMING_SNAKE_CASE_ ) )
| 259 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class a__:
def __init__( self : List[Any] , __snake_case : Optional[int] = 6 ):
a : Node | None = None
a : Node | None = None
self.create_linked_list(__snake_case )
def lowercase_ ( self : Optional[Any] , __snake_case : str ):
a : int = Node()
a : Any = current_node
a : Optional[Any] = current_node
a : Tuple = current_node
for _ in range(1 , __snake_case ):
a : List[str] = Node()
a : List[str] = current_node
a : List[str] = previous_node
a : Dict = current_node
a : int = self.front
a : Optional[int] = previous_node
def lowercase_ ( self : Dict ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowercase_ ( self : Tuple ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowercase_ ( self : Dict , __snake_case : List[str] ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
a : int = self.rear.next
if self.rear:
a : Dict = data
def lowercase_ ( self : int ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
a : int = self.front.data
a : List[Any] = None
return data
a : Optional[Any] = self.front
a : Dict = old_front.next
a : Dict = old_front.data
a : List[str] = None
return data
def lowercase_ ( self : List[str] ):
if self.is_empty():
raise Exception('Empty Queue' )
def lowercase_ ( self : Any ):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class a__:
def __init__( self : str ):
a : Any | None = None
a : Node | None = None
a : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod() | 297 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "ibert"
def __init__( self : Union[str, Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : Union[str, Any]=12 , lowercase_ : Any=12 , lowercase_ : Optional[Any]=3072 , lowercase_ : int="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Dict=512 , lowercase_ : Any=2 , lowercase_ : str=0.02 , lowercase_ : Tuple=1e-12 , lowercase_ : List[Any]=1 , lowercase_ : Dict=0 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]="absolute" , lowercase_ : int=False , lowercase_ : str="none" , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = type_vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE_ : List[str] = force_dequant
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE_ : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 91 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase :Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , _A : Dict , _A : str=7 , _A : Optional[Any]=3 , _A : List[str]=18 , _A : List[str]=30 , _A : Tuple=400 , _A : Tuple=None , _A : Tuple=True , _A : Dict=True , _A : Dict=None , ) -> List[Any]:
__magic_name__ : Any = size if size is not None else {'''height''': 20, '''width''': 20}
__magic_name__ : int = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = num_channels
__magic_name__ : Dict = image_size
__magic_name__ : List[str] = min_resolution
__magic_name__ : List[str] = max_resolution
__magic_name__ : str = size
__magic_name__ : Dict = do_normalize
__magic_name__ : Dict = do_convert_rgb
__magic_name__ : Any = [512, 1024, 2048, 4096]
__magic_name__ : Dict = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __lowerCAmelCase ( self : str ) -> Any:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : str = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__magic_name__ : Any = Image.open(requests.get(_A , stream=_A ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Any = PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Optional[Any] = PixaStructImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_convert_rgb' ) )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
__magic_name__ : int = self.image_processor_tester.prepare_dummy_image()
__magic_name__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
__magic_name__ : Any = 2048
__magic_name__ : Optional[Any] = image_processor(_A , return_tensors='pt' , max_patches=_A )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processor
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__magic_name__ : Dict = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ : List[Any] = image_processor(
_A , return_tensors='pt' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processor
__magic_name__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__magic_name__ : Optional[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__magic_name__ : Optional[Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_A ):
__magic_name__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_A ).flattened_patches
__magic_name__ : Union[str, Any] = '''Hello'''
__magic_name__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_A , header_text=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ : int = image_processor(
_A , return_tensors='pt' , max_patches=_A , header_text=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self : List[str] ) -> str:
# Initialize image_processor
__magic_name__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
__magic_name__ : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ : Tuple = image_processor(
_A , return_tensors='pt' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# Initialize image_processor
__magic_name__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__magic_name__ : str = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ : Union[str, Any] = image_processor(
_A , return_tensors='pt' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
__magic_name__ : Any = PixaStructImageProcessingTester(self , num_channels=4 )
__magic_name__ : Optional[Any] = 3
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_convert_rgb' ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processor
__magic_name__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__magic_name__ : Optional[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__magic_name__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__magic_name__ : Any = image_processor(
_A , return_tensors='pt' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) | 331 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Any:
__lowercase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Tuple:
__lowercase : Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
__lowercase : Any = s_dict.pop(UpperCamelCase__ )
elif "subsample" in key:
__lowercase : str = s_dict.pop(UpperCamelCase__ )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
__lowercase : Optional[Any] = emb.weight.shape
__lowercase : Optional[int] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
__lowercase : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
__lowercase : Dict = torch.load(UpperCamelCase__ , map_location='''cpu''' )
__lowercase : Optional[int] = mam_aaa['''args''']
__lowercase : Dict = mam_aaa['''model''']
__lowercase : Any = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCamelCase__ )
rename_keys(UpperCamelCase__ )
__lowercase : str = state_dict['''decoder.embed_tokens.weight'''].shape[0]
__lowercase : Optional[int] = args.share_decoder_input_output_embed
__lowercase : Any = [int(UpperCamelCase__ ) for i in args.conv_kernel_sizes.split(''',''' )]
__lowercase : Dict = SpeechaTextConfig(
vocab_size=UpperCamelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(UpperCamelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase__ , num_beams=5 , max_length=200 , use_cache=UpperCamelCase__ , decoder_start_token_id=2 , early_stopping=UpperCamelCase__ , )
__lowercase : Tuple = SpeechaTextForConditionalGeneration(UpperCamelCase__ )
__lowercase : Optional[Any] = model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0 and not set(UpperCamelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F' but all the following weights are missing {missing}' )
if tie_embeds:
__lowercase : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__lowercase : Any = lm_head_weights
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 156 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None:
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.', lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
| 69 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = ArgumentParser('''Transformers CLI tool''', usage='''transformers-cli <command> [<args>]''' )
_UpperCamelCase = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__ )
DownloadCommand.register_subcommand(UpperCamelCase__ )
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
RunCommand.register_subcommand(UpperCamelCase__ )
ServeCommand.register_subcommand(UpperCamelCase__ )
UserCommands.register_subcommand(UpperCamelCase__ )
AddNewModelCommand.register_subcommand(UpperCamelCase__ )
AddNewModelLikeCommand.register_subcommand(UpperCamelCase__ )
LfsCommands.register_subcommand(UpperCamelCase__ )
PTtoTFCommand.register_subcommand(UpperCamelCase__ )
# Let's go
_UpperCamelCase = parser.parse_args()
if not hasattr(UpperCamelCase__, '''func''' ):
parser.print_help()
exit(1 )
# Run
_UpperCamelCase = args.func(UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 194 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def snake_case__ ( lowercase_ : Optional[Any] )-> List[str]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
raise NotImplementedError()
| 7 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=None , ):
a :Any = size if size is not None else {'''shortest_edge''': 18}
a :Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
a :Tuple = parent
a :str = batch_size
a :Any = num_channels
a :Optional[Any] = num_frames
a :Dict = image_size
a :List[Any] = min_resolution
a :Dict = max_resolution
a :Any = do_resize
a :Tuple = size
a :Tuple = do_normalize
a :Optional[int] = image_mean
a :int = image_std
a :Dict = crop_size
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
a :str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
a :Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
a :List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a :List[str] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a :Any = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
a :List[Any] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a :str = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a :Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
a :Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a :Any = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 94 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __snake_case ( a ):
UpperCAmelCase__ : Dict = '''mgp-str'''
def __init__( self : Dict , _snake_case : Optional[Any]=[32, 128] , _snake_case : int=4 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=27 , _snake_case : Union[str, Any]=38 , _snake_case : int=50257 , _snake_case : str=30522 , _snake_case : Union[str, Any]=768 , _snake_case : Optional[int]=12 , _snake_case : Dict=12 , _snake_case : Optional[Any]=4.0 , _snake_case : int=True , _snake_case : Union[str, Any]=False , _snake_case : List[Any]=1e-5 , _snake_case : str=0.0 , _snake_case : Dict=0.0 , _snake_case : Union[str, Any]=0.0 , _snake_case : Optional[int]=False , _snake_case : Tuple=0.0_2 , **_snake_case : str , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = max_token_length
UpperCAmelCase_ = num_character_labels
UpperCAmelCase_ = num_bpe_labels
UpperCAmelCase_ = num_wordpiece_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = distilled
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = drop_rate
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = attn_drop_rate
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = output_aa_attentions
UpperCAmelCase_ = initializer_range
| 51 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
A__: Tuple = None
A__: Optional[int] = logging.get_logger(__name__)
A__: str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A__: List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
A__: Optional[int] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
A__: Optional[int] = '▁'
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = BigBirdTokenizer
UpperCamelCase__ = ["""input_ids""", """attention_mask"""]
UpperCamelCase__ = []
def __init__( self: List[str] , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: str=None , __lowerCamelCase: Optional[Any]="<unk>" , __lowerCamelCase: Any="<s>" , __lowerCamelCase: Optional[int]="</s>" , __lowerCamelCase: Tuple="<pad>" , __lowerCamelCase: List[Any]="[SEP]" , __lowerCamelCase: str="[MASK]" , __lowerCamelCase: int="[CLS]" , **__lowerCamelCase: Dict , ):
'''simple docstring'''
UpperCamelCase__: int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
UpperCamelCase__: int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
UpperCamelCase__: List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
UpperCamelCase__: str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
UpperCamelCase__: Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
UpperCamelCase__: List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__: List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: Dict = vocab_file
UpperCamelCase__: Optional[int] = False if not self.vocab_file else True
def UpperCAmelCase_ ( self: int , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] = None ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = [self.sep_token_id]
UpperCamelCase__: Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[str] = None , __lowerCamelCase: Dict = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: str = None ):
'''simple docstring'''
UpperCamelCase__: str = [self.sep_token_id]
UpperCamelCase__: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__: str = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 149 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : List[str] =['input_values', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_6000 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 80 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 64 , SCREAMING_SNAKE_CASE_ = "hann_window" , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 80 , SCREAMING_SNAKE_CASE_ = 7600 , SCREAMING_SNAKE_CASE_ = 1e-10 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> Any:
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = do_normalize
UpperCamelCase :Optional[int] = return_attention_mask
UpperCamelCase :List[str] = num_mel_bins
UpperCamelCase :str = hop_length
UpperCamelCase :Dict = win_length
UpperCamelCase :Tuple = win_function
UpperCamelCase :Optional[int] = frame_signal_scale
UpperCamelCase :Union[str, Any] = fmin
UpperCamelCase :int = fmax
UpperCamelCase :Any = mel_floor
UpperCamelCase :Any = reduction_factor
UpperCamelCase :List[Any] = win_length * sampling_rate // 1000
UpperCamelCase :Any = hop_length * sampling_rate // 1000
UpperCamelCase :Union[str, Any] = optimal_fft_length(self.sample_size )
UpperCamelCase :List[Any] = (self.n_fft // 2) + 1
UpperCamelCase :Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , SCREAMING_SNAKE_CASE_ , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , SCREAMING_SNAKE_CASE_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE_ , np.intaa )
UpperCamelCase :Optional[int] = []
for vector, length in zip(SCREAMING_SNAKE_CASE_ , attention_mask.sum(-1 ) ):
UpperCamelCase :Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase :Union[str, Any] = padding_value
normed_input_values.append(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase :List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
UpperCamelCase :Any = spectrogram(
SCREAMING_SNAKE_CASE_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
UpperCamelCase :Optional[int] = self._process_audio(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
UpperCamelCase :Optional[Any] = None
if audio_target is not None:
UpperCamelCase :Optional[Any] = self._process_audio(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if inputs is None:
return inputs_target
else:
UpperCamelCase :Optional[Any] = inputs_target['''input_values''']
UpperCamelCase :str = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCamelCase :Optional[int] = decoder_attention_mask
return inputs
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchFeature:
UpperCamelCase :int = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
UpperCamelCase :Optional[Any] = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase :Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
UpperCamelCase :List[Any] = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCamelCase :Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase :Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
UpperCamelCase :Optional[Any] = self.feature_size
# convert into correct format for padding
if is_target:
UpperCamelCase :List[Any] = [self._extract_mel_features(SCREAMING_SNAKE_CASE_ ) for waveform in speech]
UpperCamelCase :Optional[Any] = BatchFeature({'''input_values''': features} )
UpperCamelCase :Any = self.num_mel_bins
else:
UpperCamelCase :Union[str, Any] = BatchFeature({'''input_values''': speech} )
UpperCamelCase :Tuple = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Optional[Any] = feature_size_hack
# convert input values to correct format
UpperCamelCase :Optional[int] = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
UpperCamelCase :int = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCamelCase :Optional[Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCamelCase :int = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCamelCase :Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCamelCase :List[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCamelCase :Optional[int] = (
attention_mask
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase :str = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=SCREAMING_SNAKE_CASE_ , padding_value=self.padding_value )
if return_tensors is not None:
UpperCamelCase :Optional[Any] = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
def UpperCAmelCase ( self ) -> Dict[str, Any]:
UpperCamelCase :List[Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCamelCase :Tuple = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 259 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCamelCase__ ( _A ):
if not postfix_notation:
return 0
a : Optional[Any] = {'''+''', '''-''', '''*''', '''/'''}
a : list[Any] = []
for token in postfix_notation:
if token in operations:
a : Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod() | 297 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _A (__a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _A (__a , __a , __a , __a , __a=True ) -> Any:
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def _A (__a , __a=False ) -> Any:
"""simple docstring"""
set_seed(42 )
SCREAMING_SNAKE_CASE_ : Any = RegressionModel()
SCREAMING_SNAKE_CASE_ : List[str] = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE_ : Optional[Any] = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ : Optional[int] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LambdaLR(UpperCamelCase__ , lr_lambda=lambda __a : epoch**0.65 )
SCREAMING_SNAKE_CASE_ : Any = LambdaLR(UpperCamelCase__ , lr_lambda=lambda __a : epoch**0.65 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _A (__a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE_ : str = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ : int = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def _A (__a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE_ : Optional[int] = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_ : Tuple = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def _A (__a=False , __a=False ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ : Dict = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_ : str = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def _A (__a=False , __a=False ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ : Tuple = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : str = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ : List[str] = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
SCREAMING_SNAKE_CASE_ : List[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_ : Any = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE_ : List[Any] = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE_ : List[str] = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator()
SCREAMING_SNAKE_CASE_ : str = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def _A (__a ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 91 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase :List[Any] = 2
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Tuple , *, # begin keyword-only arguments
_A : Any="<s>" , _A : Optional[int]="<pad>" , _A : Optional[Any]="</s>" , _A : Optional[int]="<unk>" , _A : List[str]=None , ) -> List[Any]:
__magic_name__ : List[Any] = bos, unk, pad, eos
__magic_name__ : Union[str, Any] = []
__magic_name__ : Tuple = []
__magic_name__ : int = {}
__magic_name__ : str = self.add_symbol(_A )
__magic_name__ : List[str] = self.add_symbol(_A )
__magic_name__ : List[str] = self.add_symbol(_A )
__magic_name__ : str = self.add_symbol(_A )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_A )
__magic_name__ : Dict = len(self.symbols )
def __eq__( self : List[str] , _A : Tuple ) -> Any:
return self.indices == other.indices
def __getitem__( self : Optional[int] , _A : Optional[Any] ) -> Union[str, Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Optional[Any] ) -> Optional[int]:
return len(self.symbols )
def __contains__( self : Any , _A : List[str] ) -> List[Any]:
return sym in self.indices
@classmethod
def __lowerCAmelCase ( cls : Any , _A : Optional[Any] ) -> Tuple:
__magic_name__ : Any = cls()
d.add_from_file(_A )
return d
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : List[Any]=1 , _A : Any=False ) -> Dict:
if word in self.indices and not overwrite:
__magic_name__ : Tuple = self.indices[word]
__magic_name__ : Tuple = self.count[idx] + n
return idx
else:
__magic_name__ : Union[str, Any] = len(self.symbols )
__magic_name__ : str = idx
self.symbols.append(_A )
self.count.append(_A )
return idx
def __lowerCAmelCase ( self : Optional[Any] , _A : str ) -> Optional[int]:
return 0
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] ) -> Union[str, Any]:
if isinstance(_A , _A ):
try:
with open(_A , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(_A ) )
return
__magic_name__ : Optional[Any] = f.readlines()
__magic_name__ : Optional[Any] = self._load_meta(_A )
for line in lines[indices_start_line:]:
try:
__magic_name__ : Union[str, Any] = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__magic_name__ : List[str] = True
__magic_name__ : Any = line.rsplit(' ' , 1 )
else:
__magic_name__ : Optional[Any] = False
__magic_name__ : Optional[Any] = int(_A )
__magic_name__ : List[str] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(_A ) )
self.add_symbol(_A , n=_A , overwrite=_A )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = dict((re.sub(R'@@$' , '' , UpperCamelCase__ ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , UpperCamelCase__ ), v) for k, v in d.items() )
__magic_name__ : Optional[Any] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
__magic_name__ : Union[str, Any] = d[k] # restore
return da
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : List[str] ):
"""simple docstring"""
if not os.path.exists(UpperCamelCase__ ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__magic_name__ : List[Any] = os.path.join(UpperCamelCase__ , 'checkpoint.pt' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
__magic_name__ : Optional[int] = torch.load(UpperCamelCase__ , map_location='cpu' )
__magic_name__ : Optional[int] = chkpt['''cfg''']['''model''']
# dicts
__magic_name__ : str = os.path.join(UpperCamelCase__ , 'dict.txt' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
__magic_name__ : Tuple = Dictionary.load(UpperCamelCase__ )
__magic_name__ : Dict = rewrite_dict_keys(src_dict.indices )
__magic_name__ : Optional[Any] = len(UpperCamelCase__ )
__magic_name__ : Tuple = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['vocab_file'] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
__magic_name__ : int = os.path.join(UpperCamelCase__ , 'bpecodes' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
__magic_name__ : int = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
# model config
__magic_name__ : Dict = os.path.join(UpperCamelCase__ , 'config.json' )
__magic_name__ : Tuple = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
__magic_name__ : List[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : List[Any] = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
__magic_name__ : Union[str, Any] = chkpt['''model''']
# remove unneeded keys
__magic_name__ : Optional[Any] = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : Dict = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__magic_name__ : Dict = model_state_dict.pop(UpperCamelCase__ )
else:
__magic_name__ : Optional[Any] = model_state_dict.pop(UpperCamelCase__ )
__magic_name__ : int = BioGptConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ : Union[str, Any] = BioGptForCausalLM(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ )
# save
__magic_name__ : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print('Conversion is done!' )
if __name__ == "__main__":
lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase :int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) | 331 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
__lowercase : List[Any] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
__lowercase : Any = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
__lowercase : List[Any] = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
__lowercase : Optional[int] = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , '''dataset_info.json''' ) )
def UpperCAmelCase_ ( ) -> Dict:
__lowercase : Optional[int] = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
__lowercase : Union[str, Any] = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__lowercase : Any = yaml.safe_dump(UpperCamelCase__ )
__lowercase : Optional[int] = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ) -> List[Any]:
__lowercase : Any = DatasetInfo()
__lowercase : Optional[int] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
__lowercase : Union[str, Any] = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
__lowercase : Optional[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__lowercase : Any = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__lowercase : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , '''README.md''' ) )
| 156 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__UpperCamelCase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__UpperCamelCase = [ord(letter) for letter in string.ascii_lowercase]
__UpperCamelCase = {ord(char) for char in VALID_CHARS}
__UpperCamelCase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
snake_case_ = ""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
for keychar, cipherchar in zip(cycle(UpperCamelCase__ ) , UpperCamelCase__ ):
snake_case_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase__ )
return decoded
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = []
for key in product(UpperCamelCase__ , repeat=3 ):
snake_case_ = try_key(UpperCamelCase__ , UpperCamelCase__ )
if encoded is not None:
possibles.append(UpperCamelCase__ )
return possibles
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase ( UpperCAmelCase = "p059_cipher.txt" ) -> Any:
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = Path(UpperCamelCase__ ).parent.joinpath(UpperCamelCase__ ).read_text(encoding='utf-8' )
snake_case_ = [int(UpperCamelCase__ ) for number in data.strip().split(',' )]
snake_case_ = filter_valid_chars(UpperCamelCase__ )
for common_word in COMMON_WORDS:
snake_case_ = filter_common_word(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
break
snake_case_ = possibles[0]
return sum(ord(UpperCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 69 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 0 |
"""simple docstring"""
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_a = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
config.addinivalue_line(
'''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase__ )
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase__, id=UpperCamelCase__ )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
if exitstatus == 5:
_UpperCamelCase = 0
# Doctest custom flag to ignore output.
_a = doctest.register_optionflag("""IGNORE_RESULT""")
_a = doctest.OutputChecker
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __a , __a , __a)
_a = CustomOutputChecker
_a = HfDoctestModule
_a = HfDocTestParser
| 194 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.