code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A__ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class __lowerCAmelCase ( __lowerCamelCase ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case )
requires_backends(self , """vision""" )
self.check_model_type(_snake_case )
def __call__( self , _snake_case , **_snake_case ):
"""simple docstring"""
return super().__call__(_snake_case , **_snake_case )
def snake_case ( self , **_snake_case ):
"""simple docstring"""
return {}, {}, {}
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = load_image(_snake_case )
_lowerCAmelCase = image.size
_lowerCAmelCase = self.image_processor(images=_snake_case , return_tensors=self.framework )
return model_inputs
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.model(**_snake_case )
return model_outputs
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.predicted_depth
_lowerCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=_snake_case )
_lowerCAmelCase = prediction.squeeze().cpu().numpy()
_lowerCAmelCase = (output * 255 / np.max(_snake_case )).astype("""uint8""" )
_lowerCAmelCase = Image.fromarray(_snake_case )
_lowerCAmelCase = {}
_lowerCAmelCase = predicted_depth
_lowerCAmelCase = depth
return output_dict
| 82 |
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = "", __magic_name__ = False ) -> None:
"""simple docstring"""
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : Optional[Any] = is_leaf
UpperCamelCase__ : List[str] = prefix
def UpperCamelCase__ ( self, __magic_name__ ) -> tuple[str, str, str]:
"""simple docstring"""
UpperCamelCase__ : Dict = 0
for q, w in zip(self.prefix, __magic_name__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
for word in words:
self.insert(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Tuple = RadixNode(prefix=__magic_name__, is_leaf=__magic_name__ )
else:
UpperCamelCase__ : Any = self.nodes[word[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__magic_name__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : Tuple = self.nodes[matching_string[0]]
UpperCamelCase__ : List[Any] = RadixNode(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : Any = True
else:
self.nodes[matching_string[0]].insert(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0], __magic_name__ )
if not incoming_node:
return False
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0], __magic_name__ )
if not incoming_node:
return False
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__magic_name__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCamelCase__ : Optional[Any] = list(self.nodes.values() )[0]
UpperCamelCase__ : Union[str, Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCamelCase__ : Any = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
UpperCamelCase__ : List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : int = merging_node.nodes
return True
def UpperCamelCase__ ( self, __magic_name__ = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCAmelCase_ ( ) -> bool:
UpperCamelCase__ : Optional[int] = '''banana bananas bandana band apple all beast'''.split()
UpperCamelCase__ : Optional[int] = RadixNode()
root.insert_many(__UpperCAmelCase )
assert all(root.find(__UpperCAmelCase ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCAmelCase_ ( ) -> None:
assert test_trie()
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase__ : int = RadixNode()
UpperCamelCase__ : Any = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(__UpperCAmelCase )
print('''Words:''' , __UpperCAmelCase )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 201 | 0 |
'''simple docstring'''
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 359 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case_ : int = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , np.ndarray ):
return list(tensor.shape )
_UpperCamelCase : Any = tf.shape(UpperCAmelCase_ )
if tensor.shape == tf.TensorShape(UpperCAmelCase_ ):
return dynamic
_UpperCamelCase : Any = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase_ )]
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase_ , name=UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=1E-5 , UpperCAmelCase_=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
_UpperCamelCase , _UpperCamelCase : Any = tf.nn.moments(UpperCAmelCase_ , axes=[axis] , keepdims=UpperCAmelCase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_UpperCamelCase : str = [1] * inputs.shape.rank
_UpperCamelCase : List[str] = shape_list(UpperCAmelCase_ )[axis]
_UpperCamelCase : Optional[int] = tf.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : str = tf.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
# Compute layer normalization using the batch_normalization
# function.
_UpperCamelCase : str = tf.nn.batch_normalization(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , offset=UpperCAmelCase_ , scale=UpperCAmelCase_ , variance_epsilon=UpperCAmelCase_ , )
return outputs
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=0 , UpperCAmelCase_=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_UpperCamelCase : str = tf.shape(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_UpperCamelCase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , tf.Tensor ):
_UpperCamelCase : str = tf.convert_to_tensor(UpperCAmelCase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_UpperCamelCase : Union[str, Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_UpperCamelCase : List[str] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = "input_ids" ):
tf.debugging.assert_less(
UpperCAmelCase_ , tf.cast(UpperCAmelCase_ , dtype=tensor.dtype ) , message=(
f'The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase_ )}) must be smaller than the embedding '
f'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_UpperCamelCase : Dict = [x for x in data if len(UpperCAmelCase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
f'bytes: {bad_attributes}' )
_UpperCamelCase : int = np.asarray(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : Optional[Any] = np.array_split(UpperCAmelCase_ , UpperCAmelCase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_UpperCamelCase : Optional[int] = np.array_split(UpperCAmelCase_ , UpperCAmelCase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = chunk_data
else:
_UpperCamelCase : List[str] = data
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
if name in group.attrs:
_UpperCamelCase : Tuple = [n.decode('utf8' ) if hasattr(UpperCAmelCase_ , 'decode' ) else n for n in group.attrs[name]]
else:
_UpperCamelCase : int = []
_UpperCamelCase : int = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(UpperCAmelCase_ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def A__ ( UpperCAmelCase_ ):
def _expand_single_ad_tensor(UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase_ )
| 83 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowercase__ = False
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase_ ( self : str ) -> Tuple:
return 12
@property
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
return 12
@property
def UpperCAmelCase_ ( self : List[Any] ) -> str:
return 32
@property
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
torch.manual_seed(0 )
UpperCAmelCase : Any = 12
UpperCAmelCase : Optional[int] = 12
UpperCAmelCase : Tuple = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCAmelCase : Optional[Any] = TransformeraDModel(**lowercase_ )
return model
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Any = 'cpu'
UpperCAmelCase : Optional[int] = self.dummy_vqvae
UpperCAmelCase : List[str] = self.dummy_text_encoder
UpperCAmelCase : str = self.dummy_tokenizer
UpperCAmelCase : Dict = self.dummy_transformer
UpperCAmelCase : Union[str, Any] = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase : str = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
UpperCAmelCase : Optional[Any] = VQDiffusionPipeline(
vqvae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , transformer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
UpperCAmelCase : Optional[int] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Any = 'teddy bear playing in the pool'
UpperCAmelCase : List[str] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : List[str] = pipe([prompt] , generator=lowercase_ , num_inference_steps=2 , output_type='np' )
UpperCAmelCase : List[str] = output.images
UpperCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : Union[str, Any] = pipe(
[prompt] , generator=lowercase_ , output_type='np' , return_dict=lowercase_ , num_inference_steps=2 )[0]
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase : Optional[Any] = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : str = 'cpu'
UpperCAmelCase : List[str] = self.dummy_vqvae
UpperCAmelCase : Tuple = self.dummy_text_encoder
UpperCAmelCase : Dict = self.dummy_tokenizer
UpperCAmelCase : int = self.dummy_transformer
UpperCAmelCase : Optional[int] = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase : List[str] = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
UpperCAmelCase : List[Any] = VQDiffusionPipeline(
vqvae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , transformer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
UpperCAmelCase : Optional[int] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = 'teddy bear playing in the pool'
UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : Tuple = pipe([prompt] , generator=lowercase_ , num_inference_steps=2 , output_type='np' )
UpperCAmelCase : List[Any] = output.images
UpperCAmelCase : Tuple = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : int = pipe(
[prompt] , generator=lowercase_ , output_type='np' , return_dict=lowercase_ , num_inference_steps=2 )[0]
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
UpperCAmelCase : Tuple = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
UpperCAmelCase : str = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCAmelCase : Any = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase : Any = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=lowercase_ , output_type='np' , )
UpperCAmelCase : Dict = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 151 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
_SCREAMING_SNAKE_CASE : Any = tuple[int, int]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : set[int] , __lowerCamelCase : Mapping[EdgeT, int] ) -> None:
SCREAMING_SNAKE_CASE__ = vertices
SCREAMING_SNAKE_CASE__ = {
(min(__lowerCamelCase ), max(__lowerCamelCase )): weight for edge, weight in edges.items()
}
def lowercase_ ( self : Tuple , __lowerCamelCase : EdgeT , __lowerCamelCase : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
SCREAMING_SNAKE_CASE__ = weight
def lowercase_ ( self : Optional[Any] ) -> Graph:
SCREAMING_SNAKE_CASE__ = Graph({min(self.vertices )} , {} )
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
SCREAMING_SNAKE_CASE__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
SCREAMING_SNAKE_CASE__ = edge
SCREAMING_SNAKE_CASE__ = weight
subgraph.add_edge(__lowerCamelCase , __lowerCamelCase )
return subgraph
def UpperCAmelCase_ ( _A = "p107_network.txt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(_A ) )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
with open(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.read().strip().split('''\n''' )
SCREAMING_SNAKE_CASE__ = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(_A ) ):
for edgea in range(_A ):
if adjaceny_matrix[edgea][edgea] != "-":
SCREAMING_SNAKE_CASE__ = int(adjaceny_matrix[edgea][edgea] )
SCREAMING_SNAKE_CASE__ = Graph(set(range(len(_A ) ) ) , _A )
SCREAMING_SNAKE_CASE__ = graph.prims_algorithm()
SCREAMING_SNAKE_CASE__ = sum(graph.edges.values() )
SCREAMING_SNAKE_CASE__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 361 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(_A )
while len(_A ) != 1:
SCREAMING_SNAKE_CASE__ = [int(_A ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 1
for i in range(0 , len(_A ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE__ = str(_A )
steps += 1
return steps
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(_A )
while len(_A ) != 1:
SCREAMING_SNAKE_CASE__ = [int(_A ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 0
for i in range(0 , len(_A ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE__ = str(_A )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 0 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A__ ( __UpperCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "arrow" , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = load_from_cache_file
__lowerCAmelCase : Dict = file_format
__lowerCAmelCase : Union[str, Any] = Spark(
df=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , working_dir=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__lowerCAmelCase : List[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 86 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase : Any = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase ):
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" ,snake_case ,)
super().__init__(*snake_case ,**snake_case )
| 363 |
from collections.abc import Callable
import numpy as np
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> np.array:
lowercase : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
lowercase : List[Any] = np.zeros((n + 1,) )
lowercase : Optional[int] = ya
lowercase : Optional[int] = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
lowercase : str = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
lowercase : Union[str, Any] = y[k] + (
(step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE__ , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
'''simple docstring'''
import os
def __A ( lowerCamelCase_ = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) as input_file:
SCREAMING_SNAKE_CASE : Dict = [
[int(__snake_case ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
SCREAMING_SNAKE_CASE : List[Any] = len(__snake_case )
SCREAMING_SNAKE_CASE : int = len(matrix[0] )
SCREAMING_SNAKE_CASE : Optional[int] = [[-1 for _ in range(__snake_case )] for _ in range(__snake_case )]
for i in range(__snake_case ):
SCREAMING_SNAKE_CASE : int = matrix[i][0]
for j in range(1 , __snake_case ):
for i in range(__snake_case ):
SCREAMING_SNAKE_CASE : Optional[int] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __snake_case ):
SCREAMING_SNAKE_CASE : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
SCREAMING_SNAKE_CASE : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 323 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : List[str]=7 ) -> str:
lowercase : int = None
if token is not None:
lowercase : Any = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase : int = "636036"
lowercase : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase : int = requests.get(__snake_case , headers=__snake_case ).json()
return result["workflow_runs"]
def __magic_name__ ( __snake_case : Dict ) -> Tuple:
lowercase : Tuple = get_daily_ci_runs(__snake_case )
lowercase : Union[str, Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase : List[Any] = workflow_run["id"]
break
return workflow_run_id
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> int:
lowercase : Dict = get_last_daily_ci_runs(__snake_case )
if workflow_run_id is not None:
lowercase : Dict = get_artifacts_links(worflow_run_id=__snake_case , token=__snake_case )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__snake_case , artifact_url=__snake_case , output_dir=__snake_case , token=__snake_case )
def __magic_name__ ( __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Tuple ) -> Optional[int]:
get_last_daily_ci_artifacts(__snake_case , __snake_case , __snake_case )
lowercase : str = {}
for artifact_name in artifact_names:
lowercase : Optional[Any] = os.path.join(__snake_case , f"""{artifact_name}.zip""" )
if os.path.isfile(__snake_case ):
lowercase : List[Any] = {}
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
with z.open(__snake_case ) as f:
lowercase : str = f.read().decode("UTF-8" )
return results
| 202 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Dict = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='CLIPImageProcessor'
lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
def __call__(self , a_=None , a_=None , a_=None , **a_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
__snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 1 |
"""simple docstring"""
from math import factorial
def _snake_case ( lowercase__ = 100 ):
return sum(map(lowercase__ , str(factorial(lowercase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip()))) | 96 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps
_lowerCamelCase : Tuple = boundary[0]
_lowerCamelCase : Dict = boundary[1]
_lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = a + h
while x < (b - h):
yield x
_lowerCamelCase : int = x + h
def _snake_case ( lowercase__ ): # enter your function here
_lowerCamelCase : Optional[Any] = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_lowerCamelCase : int = 0.0 # Lower bound of integration
_lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration
_lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution
_lowerCamelCase : List[Any] = [a, b] # define boundary of integration
_lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main() | 96 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = False
__UpperCAmelCase : str = 3.0
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
snake_case : Any = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
snake_case : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _snake_case )
@require_multi_gpu
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
_A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_A = Accelerator(kwargs_handlers=[ddp_scaler])
_A = torch.nn.Linear(100, 200)
_A = accelerator.prepare(model)
# Check the values changed in kwargs
_A = ''
_A = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 361 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : List[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
snake_case : int = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = object_detector(examples[0] , threshold=0.0 )
snake_case : str = len(UpperCamelCase__ )
self.assertGreater(UpperCamelCase__ , 0 )
self.assertEqual(
UpperCamelCase__ , [
{
"score": ANY(UpperCamelCase__ ),
"label": ANY(UpperCamelCase__ ),
"box": {"xmin": ANY(UpperCamelCase__ ), "ymin": ANY(UpperCamelCase__ ), "xmax": ANY(UpperCamelCase__ ), "ymax": ANY(UpperCamelCase__ )},
}
for i in range(UpperCamelCase__ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Dict = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
snake_case : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
snake_case : Dict = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Optional[int] = pipeline("zero-shot-object-detection" )
snake_case : Tuple = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
snake_case : List[Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
@require_torch
@slow
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = 0.2
snake_case : List[str] = pipeline("zero-shot-object-detection" )
snake_case : List[Any] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = 2
snake_case : Optional[Any] = pipeline("zero-shot-object-detection" )
snake_case : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 112 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = DDIMPipeline
__lowerCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__lowerCamelCase : List[Any] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
__lowerCamelCase : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__lowerCamelCase : Optional[int] = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
UpperCAmelCase : int =DDIMScheduler()
UpperCAmelCase : str ={'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(snake_case__ ).startswith('''mps''' ):
UpperCAmelCase : List[str] =torch.manual_seed(snake_case__ )
else:
UpperCAmelCase : Union[str, Any] =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase : Any ={
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple ='''cpu'''
UpperCAmelCase : Tuple =self.get_dummy_components()
UpperCAmelCase : str =self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Union[str, Any] =self.get_dummy_inputs(snake_case__ )
UpperCAmelCase : int =pipe(**snake_case__ ).images
UpperCAmelCase : List[str] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCAmelCase : Any =np.array(
[1.0_00e00, 5.7_17e-01, 4.7_17e-01, 1.0_00e00, 0.0_00e00, 1.0_00e00, 3.0_00e-04, 0.0_00e00, 9.0_00e-04] )
UpperCAmelCase : List[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ , 1e-3 )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple ='''google/ddpm-cifar10-32'''
UpperCAmelCase : List[str] =UNetaDModel.from_pretrained(snake_case__ )
UpperCAmelCase : Tuple =DDIMScheduler()
UpperCAmelCase : List[str] =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddim.to(snake_case__ )
ddim.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : int =torch.manual_seed(0 )
UpperCAmelCase : int =ddim(generator=snake_case__ , eta=0.0 , output_type='''numpy''' ).images
UpperCAmelCase : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Dict =np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple ='''google/ddpm-ema-bedroom-256'''
UpperCAmelCase : List[Any] =UNetaDModel.from_pretrained(snake_case__ )
UpperCAmelCase : str =DDIMScheduler.from_pretrained(snake_case__ )
UpperCAmelCase : Union[str, Any] =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddpm.to(snake_case__ )
ddpm.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Optional[Any] =torch.manual_seed(0 )
UpperCAmelCase : List[str] =ddpm(generator=snake_case__ , output_type='''numpy''' ).images
UpperCAmelCase : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Dict =np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 348 | import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,)
__lowerCamelCase : List[str] = 10
def UpperCAmelCase__ ( self , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : str =self.dummy_model()
UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Any =model(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : int =output.prev_sample
UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : Any =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config()
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[int] =self.dummy_model()
UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : str =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =output.prev_sample
UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : List[Any] =self.scheduler_classes[0]
UpperCAmelCase : Dict =self.get_scheduler_config()
UpperCAmelCase : List[str] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
UpperCAmelCase : int =self.dummy_model()
UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : int =model(snake_case__ , snake_case__ )
UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =output.prev_sample
UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 348 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
a_ : int = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self , a = True , a = 1 / 255 , a = True , a = 8 , **a , ) -> None:
super().__init__(**a)
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
SCREAMING_SNAKE_CASE = pad_size
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None , **a) -> np.ndarray:
return rescale(a , scale=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_image_size(a)
SCREAMING_SNAKE_CASE = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE = (old_width // size + 1) * size - old_width
return pad(a , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(a) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=a , scale=a) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE = [self.pad(a , size=a) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , a) for image in images]
SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a)
| 327 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase = 4_00_00_00 ) -> int:
UpperCAmelCase__ : int = [0, 1]
UpperCAmelCase__ : int = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCAmelCase__ : str = 0
for j in range(len(lowerCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 171 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336 | 0 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( __snake_case : Dict , __snake_case : Tuple , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = WavaVecaForSequenceClassification.from_pretrained(__snake_case , config=__snake_case )
UpperCAmelCase_ : Optional[Any] = downstream_dict['projector.weight']
UpperCAmelCase_ : List[Any] = downstream_dict['projector.bias']
UpperCAmelCase_ : Tuple = downstream_dict['model.post_net.linear.weight']
UpperCAmelCase_ : List[str] = downstream_dict['model.post_net.linear.bias']
return model
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__snake_case , config=__snake_case )
UpperCAmelCase_ : List[str] = downstream_dict['model.linear.weight']
UpperCAmelCase_ : Union[str, Any] = downstream_dict['model.linear.bias']
return model
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = WavaVecaForXVector.from_pretrained(__snake_case , config=__snake_case )
UpperCAmelCase_ : List[Any] = downstream_dict['connector.weight']
UpperCAmelCase_ : List[str] = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ : int = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
UpperCAmelCase_ : str = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
UpperCAmelCase_ : str = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
UpperCAmelCase_ : int = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
UpperCAmelCase_ : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
UpperCAmelCase_ : int = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
UpperCAmelCase_ : List[Any] = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = torch.load(__snake_case , map_location='cpu' )
UpperCAmelCase_ : Dict = checkpoint['Downstream']
UpperCAmelCase_ : Any = WavaVecaConfig.from_pretrained(__snake_case )
UpperCAmelCase_ : int = WavaVecaFeatureExtractor.from_pretrained(
__snake_case , return_attention_mask=__snake_case , do_normalize=__snake_case )
UpperCAmelCase_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
UpperCAmelCase_ : Union[str, Any] = convert_classification(__snake_case , __snake_case , __snake_case )
elif arch.endswith('ForAudioFrameClassification' ):
UpperCAmelCase_ : str = convert_diarization(__snake_case , __snake_case , __snake_case )
elif arch.endswith('ForXVector' ):
UpperCAmelCase_ : Optional[Any] = convert_xvector(__snake_case , __snake_case , __snake_case )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ : Dict = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(__snake_case )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__UpperCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 145 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Any = (DDPMParallelScheduler,)
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[str] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_UpperCamelCase )
return config
def __UpperCAmelCase ( self ) -> Any:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.check_over_configs(thresholding=_UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , )
def __UpperCAmelCase ( self ) -> Any:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = len(_UpperCamelCase )
UpperCAmelCase_ : Any = self.dummy_model()
UpperCAmelCase_ : List[Any] = self.dummy_sample_deter
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : str = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : Tuple = samplea.shape[0]
UpperCAmelCase_ : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ : str = torch.arange(_UpperCamelCase )[0:3, None].repeat(1 , _UpperCamelCase )
UpperCAmelCase_ : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ : Dict = scheduler.batch_step_no_noise(_UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1E-2
assert abs(result_mean.item() - 0.50_05 ) < 1E-3
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : Optional[int] = self.dummy_sample_deter
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
UpperCAmelCase_ : str = pred_prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCAmelCase_ : Tuple = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = len(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
UpperCAmelCase_ : Any = torch.manual_seed(0 )
for t in reversed(range(_UpperCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase , _UpperCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
UpperCAmelCase_ : List[str] = pred_prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCamelCase )
UpperCAmelCase_ : Tuple = scheduler.timesteps
for i, timestep in enumerate(_UpperCamelCase ):
if i == len(_UpperCamelCase ) - 1:
UpperCAmelCase_ : List[str] = -1
else:
UpperCAmelCase_ : int = timesteps[i + 1]
UpperCAmelCase_ : int = scheduler.previous_timestep(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = prev_t.item()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Tuple = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(_UpperCamelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = [1_0_0, 8_7, 5_0, 1, 0]
UpperCAmelCase_ : Optional[Any] = len(_UpperCamelCase )
with self.assertRaises(_UpperCamelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_UpperCamelCase , timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Optional[Any] = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_UpperCamelCase )
| 145 | 1 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
if not isinstance(__A , __A ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
UpperCamelCase = precision
UpperCamelCase = ceil(precision / 14 )
UpperCamelCase = 426880 * Decimal(10005 ).sqrt()
UpperCamelCase = 1
UpperCamelCase = 13591409
UpperCamelCase = Decimal(__A )
for k in range(1 , __A ):
UpperCamelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(__A ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = 5_0
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 343 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
a__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
a__ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , a ) != do_lower_case
or pre_tok_state.get("strip_accents" , a ) != strip_accents
):
UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = pre_tok_class(**a )
UpperCamelCase__ = do_lower_case
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = BertPreTokenizer()
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) )
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __a ( self , a , a=None , a=None , a=False , **a , ):
UpperCamelCase__ = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a )
| 80 | 0 |
def UpperCamelCase ( _A ):
"""simple docstring"""
assert column_title.isupper()
__magic_name__ : Dict = 0
__magic_name__ : str = len(_A ) - 1
__magic_name__ : List[str] = 0
while index >= 0:
__magic_name__ : Dict = (ord(column_title[index] ) - 64) * pow(26, _A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 138 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__magic_name__: Optional[Any] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__magic_name__: List[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__magic_name__: Union[str, Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def __magic_name__ ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase__ , hypotheses=lowerCAmelCase__ , min_len=lowerCAmelCase__ , max_len=lowerCAmelCase__ )
}
| 138 | 1 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowercase_ = True
except (ImportError, AttributeError):
lowercase_ = object
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
pass
lowercase_ = False
lowercase_ = logging.get_logger("""transformers-cli/serving""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(snake_case , args.host , args.port , args.workers )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( _A : ArgumentParser ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=_A , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=_A , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=_A , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=_A , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=_A , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=_A , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=_A , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=_A , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=_A )
def __init__( self : Dict , _A : Pipeline , _A : str , _A : int , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = pipeline
__SCREAMING_SNAKE_CASE : str = host
__SCREAMING_SNAKE_CASE : int = port
__SCREAMING_SNAKE_CASE : Optional[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=_A , response_class=_A , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=_A , response_class=_A , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=_A , response_class=_A , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=_A , response_class=_A , methods=['''POST'''] , ),
] , timeout=600 , )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCAmelCase__ ( self : List[str] , _A : str = Body(_A , embed=_A ) , _A : bool = Body(_A , embed=_A ) ):
"""simple docstring"""
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = self._pipeline.tokenizer.tokenize(_A )
if return_ids:
__SCREAMING_SNAKE_CASE : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(_A )
return ServeTokenizeResult(tokens=_A , tokens_ids=_A )
else:
return ServeTokenizeResult(tokens=_A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_A )} )
def UpperCAmelCase__ ( self : str , _A : List[int] = Body(_A , embed=_A ) , _A : bool = Body(_A , embed=_A ) , _A : bool = Body(_A , embed=_A ) , ):
"""simple docstring"""
try:
__SCREAMING_SNAKE_CASE : Optional[int] = self._pipeline.tokenizer.decode(_A , _A , _A )
return ServeDeTokenizeResult(model='''''' , text=_A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_A )} )
async def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=Body(_A , embed=_A ) ):
"""simple docstring"""
if len(_A ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__SCREAMING_SNAKE_CASE : str = self._pipeline(_A )
return ServeForwardResult(output=_A )
except Exception as e:
raise HTTPException(500 , {'''error''': str(_A )} )
| 303 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a__ ( snake_case ):
"""simple docstring"""
if "://" in dataset_path:
__SCREAMING_SNAKE_CASE : Any = dataset_path.split('''://''' )[1]
return dataset_path
def a__ ( snake_case ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = not is_remote_filesystem(snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case ) , fs._strip_protocol(snake_case ) )
else:
fs.mv(snake_case , snake_case , recursive=snake_case )
def a__ ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Lock()
| 303 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Optional[int] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __snake_case ( ) -> tuple[list[int], int]:
A_ : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
A_ : List[str] = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase : List[Any] = make_dataset()
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(_lowerCAmelCase , 3 ):
if sum(_lowerCAmelCase ) == target:
return tuple(sorted(_lowerCAmelCase ) )
return (0, 0, 0)
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
A_ : Tuple = len(_lowerCAmelCase )
for i in range(n - 1 ):
A_ , A_ : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __snake_case ( ) -> tuple[float, float]:
A_ : Union[str, Any] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
A_ : Tuple = "\ntriplet_sum1(*dataset)\n"
A_ : Optional[Any] = "\ntriplet_sum2(*dataset)\n"
A_ : List[str] = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=10000 )
A_ : Tuple = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=10000 )
return (min(_lowerCAmelCase ), min(_lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase : Optional[Any] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 70 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCAmelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCamelCase__ : int = []
for num in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ : Dict = odd_composites[num] - 2 * i * i
if is_prime(_UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_UpperCAmelCase ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
lowercase = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
lowercase = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = SqueezeBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Tuple:
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
snake_case_ = getattr(a , normalizer_state.pop('type' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**a )
snake_case_ = do_lower_case
def _UpperCamelCase ( self , a , a=None ) -> Tuple:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 178 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def a__ ( ) -> Union[str, Any]:
from torch.utils.cpp_extension import load
lowerCamelCase = Path(lowerCamelCase__ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , lowerCamelCase__ , with_cuda=lowerCamelCase__ , extra_include_paths=[str(lowerCamelCase__ )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 353 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *_a , _a=None , _a=None , **_a ):
"""simple docstring"""
super().__init__(*_a , **_a )
lowerCamelCase = eval_examples
lowerCamelCase = post_process_function
def _lowerCAmelCase ( self , _a=None , _a=None , _a=None , _a = "eval" ):
"""simple docstring"""
lowerCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase = self.get_eval_dataloader(_a )
lowerCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase = time.time()
try:
lowerCamelCase = eval_loop(
_a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCamelCase = compute_metrics
lowerCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase = self.post_process_function(_a , _a , output.predictions )
lowerCamelCase = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase = metrics.pop(_a )
metrics.update(output.metrics )
else:
lowerCamelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def _lowerCAmelCase ( self , _a , _a , _a=None , _a = "test" ):
"""simple docstring"""
lowerCamelCase = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase = time.time()
try:
lowerCamelCase = eval_loop(
_a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCamelCase = compute_metrics
lowerCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase = self.post_process_function(_a , _a , output.predictions , """predict""" )
lowerCamelCase = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 168 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__snake_case =None
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__snake_case ={
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
__snake_case ={
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
__snake_case =["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = MBartTokenizer
lowerCamelCase : List[int] = []
lowerCamelCase : List[int] = []
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : str="<s>" , UpperCAmelCase__ : str="<unk>" , UpperCAmelCase__ : Dict="<pad>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : str , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = False if not self.vocab_file else True
lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCAmelCase = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase = src_lang if src_lang is not None else 'en_XX'
lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str ) -> None:
lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] , **UpperCAmelCase__ : Tuple ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCAmelCase = src_lang
lowerCAmelCase = self(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ )
lowerCAmelCase = tgt_lang_id
return inputs
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str = "en_XX" , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "ro_RO" , **UpperCAmelCase__ : Dict , ) -> BatchEncoding:
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : int ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ )
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ )
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 4 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int] ):
if metric == "rouge2":
__lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__lowerCAmelCase = ModelCheckpoint(
dirpath=lowerCAmelCase_, filename=lowerCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ):
return EarlyStopping(
monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase_, verbose=lowerCAmelCase_, )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / 'test_results.txt'
__lowerCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'a+' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Dict:
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , 'test' )
@rank_zero_only
def lowercase ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Any ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 284 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __A :
@property
def _lowercase (self : int ):
return self.get_dummy_input()
@property
def _lowercase (self : Any ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def _lowercase (self : Tuple , __a : List[str]=True , __a : Any=False , __a : List[Any]=False , __a : Any=False , ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 32
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = torch.device(__a )
UpperCAmelCase_ = (batch_size, num_channels) + sizes
UpperCAmelCase_ = randn_tensor(__a , generator=__a , device=__a )
UpperCAmelCase_ = {"hidden_states": hidden_states}
if include_temb:
UpperCAmelCase_ = 128
UpperCAmelCase_ = randn_tensor((batch_size, temb_channels) , generator=__a , device=__a )
if include_res_hidden_states_tuple:
UpperCAmelCase_ = torch.manual_seed(1 )
UpperCAmelCase_ = (randn_tensor(__a , generator=__a , device=__a ),)
if include_encoder_hidden_states:
UpperCAmelCase_ = floats_tensor((batch_size, 32, 32) ).to(__a )
if include_skip_sample:
UpperCAmelCase_ = randn_tensor(((batch_size, 3) + sizes) , generator=__a , device=__a )
return dummy_input
def _lowercase (self : Tuple ):
UpperCAmelCase_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
UpperCAmelCase_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : Tuple , __a : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.block_class(**__a )
unet_block.to(__a )
unet_block.eval()
with torch.no_grad():
UpperCAmelCase_ = unet_block(**__a )
if isinstance(__a , __a ):
UpperCAmelCase_ = output[0]
self.assertEqual(output.shape , self.output_shape )
UpperCAmelCase_ = output[0, -1, -3:, -3:]
UpperCAmelCase_ = torch.tensor(__a ).to(__a )
assert torch_all_close(output_slice.flatten() , __a , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def _lowercase (self : Dict ):
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.block_class(**__a )
model.to(__a )
model.train()
UpperCAmelCase_ = model(**__a )
if isinstance(__a , __a ):
UpperCAmelCase_ = output[0]
UpperCAmelCase_ = torch.device(__a )
UpperCAmelCase_ = randn_tensor(output.shape , device=__a )
UpperCAmelCase_ = torch.nn.functional.mse_loss(__a , __a )
loss.backward()
| 361 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ = model(__a )["last_hidden_state"]
UpperCAmelCase_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
UpperCAmelCase_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 106 | 0 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _snake_case ( snake_case__ : List[str] ):
A = []
for line in lines:
A = re.sub(r'#.*' , '' , snake_case__ ) # remove comments
if line:
filtered_lines.append(snake_case__ )
A = '\n'.join(snake_case__ )
# Make a hash from all this code
A = full_str.encode('utf-8' )
return shaaaa(snake_case__ ).hexdigest()
# get importable module names and hash for caching
_lowercase = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowercase = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowercase = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
_lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''') | 74 |
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Tuple = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 54 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: Any = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 350 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
UpperCAmelCase : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase : Tuple = bertabert.config.encoder.vocab_size
UpperCAmelCase : int = tokenizer.sep_token_id
UpperCAmelCase : Dict = tokenizer.cls_token_id
UpperCAmelCase : int = 128
UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
UpperCAmelCase : Optional[int] = train_dataset.select(range(32 ) )
UpperCAmelCase : int = val_dataset.select(range(16 ) )
UpperCAmelCase : List[str] = 4
def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase : str = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
UpperCAmelCase : str = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=128 )
UpperCAmelCase : Optional[Any] = inputs.input_ids
UpperCAmelCase : Union[str, Any] = inputs.attention_mask
UpperCAmelCase : Union[str, Any] = outputs.input_ids
UpperCAmelCase : Any = outputs.input_ids.copy()
UpperCAmelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
UpperCAmelCase : List[Any] = outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = pred.label_ids
UpperCAmelCase : Tuple = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase : List[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Dict = SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 76 | 0 |
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 39 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = '''RegNetConfig'''
# Base docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = [1, 10_88, 7, 7]
# Image classification docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = '''tabby, tabby cat'''
__lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , )
lowercase__: str = nn.BatchNormad(lowerCAmelCase__ )
lowercase__: Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.convolution(lowerCAmelCase__ )
lowercase__: Optional[Any] = self.normalization(lowerCAmelCase__ )
lowercase__: Union[str, Any] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase__: Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__: Dict = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__: Optional[int] = self.embedder(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Optional[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase__: Union[str, Any] = nn.BatchNormad(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__: Any = self.convolution(lowerCAmelCase__ )
lowercase__: str = self.normalization(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__: str = nn.Sequential(
nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
# b c h w -> b c 1 1
lowercase__: str = self.pooler(lowerCAmelCase__ )
lowercase__: List[str] = self.attention(lowerCAmelCase__ )
lowercase__: List[Any] = hidden_state * attention
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: str = in_channels != out_channels or stride != 1
lowercase__: Optional[int] = max(1 , out_channels // config.groups_width )
lowercase__: Union[str, Any] = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Tuple = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Dict = hidden_state
lowercase__: Union[str, Any] = self.layer(lowerCAmelCase__ )
lowercase__: int = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = in_channels != out_channels or stride != 1
lowercase__: List[str] = max(1 , out_channels // config.groups_width )
lowercase__: Any = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: str = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Union[str, Any] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = hidden_state
lowercase__: Optional[int] = self.layer(lowerCAmelCase__ )
lowercase__: str = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
lowercase__: str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: str = self.layers(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__: int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase__: List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
lowercase__: List[Any] = stage_module(lowerCAmelCase__ )
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class __a ( __UpperCamelCase ):
__lowercase : Dict = RegNetConfig
__lowercase : Dict = 'regnet'
__lowercase : str = 'pixel_values'
__lowercase : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Any = value
__lowerCAmelCase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCAmelCase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Tuple = config
lowercase__: List[str] = RegNetEmbeddings(lowerCAmelCase__ )
lowercase__: Optional[int] = RegNetEncoder(lowerCAmelCase__ )
lowercase__: Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase__: List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Any = self.embedder(lowerCAmelCase__ )
lowercase__: List[Any] = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Optional[Any] = encoder_outputs[0]
lowercase__: Optional[int] = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Dict = config.num_labels
lowercase__: Dict = RegNetModel(lowerCAmelCase__ )
# classification head
lowercase__: str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowercase__: str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Optional[int] = self.regnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Dict = outputs.pooler_output if return_dict else outputs[1]
lowercase__: List[str] = self.classifier(lowerCAmelCase__ )
lowercase__: Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: Optional[int] = 'single_label_classification'
else:
lowercase__: Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__: List[Any] = MSELoss()
if self.num_labels == 1:
lowercase__: Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
lowercase__: Dict = CrossEntropyLoss()
lowercase__: Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: List[Any] = BCEWithLogitsLoss()
lowercase__: Any = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
lowercase__: int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 196 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
A__ = tempfile.mkdtemp()
A__ = 8
# DPR tok
A__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
A__ = os.path.join(UpperCAmelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
A__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
A__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
A__ = {"unk_token": "<unk>"}
A__ = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
A__ = os.path.join(UpperCAmelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(UpperCAmelCase__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def __A ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __A ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __A ( self ):
A__ = os.path.join(self.tmpdirname , "rag_tokenizer" )
A__ = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
A__ = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(UpperCAmelCase__ )
rag_tokenizer.save_pretrained(UpperCAmelCase__ )
A__ = RagTokenizer.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , UpperCAmelCase__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , UpperCAmelCase__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __A ( self ):
A__ = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
A__ = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
A__ = tokenizer(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
def __A ( self ):
A__ = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
A__ = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
A__ = tokenizer(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
| 198 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( _A : List[Any] , _A : List[str]=7 )-> Optional[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
A__ = "636036"
A__ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
A__ = requests.get(_A , headers=_A ).json()
return result["workflow_runs"]
def UpperCamelCase ( _A : str )-> Dict:
"""simple docstring"""
A__ = get_daily_ci_runs(_A )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run["id"]
break
return workflow_run_id
def UpperCamelCase ( _A : int , _A : List[str] , _A : str )-> Any:
"""simple docstring"""
A__ = get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=_A , token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_A , artifact_url=_A , output_dir=_A , token=_A )
def UpperCamelCase ( _A : Optional[Any] , _A : Any , _A : List[Any] )-> Optional[int]:
"""simple docstring"""
get_last_daily_ci_artifacts(_A , _A , _A )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(_A , f"""{artifact_name}.zip""" )
if os.path.isfile(_A ):
A__ = {}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
A__ = f.read().decode("UTF-8" )
return results
| 198 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
lowerCAmelCase_ = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ = {F'''funnel-transformer/{name}''': 512 for name in _model_names}
lowerCAmelCase_ = {F'''funnel-transformer/{name}''': {'do_lower_case': True} for name in _model_names}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : int = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Dict = FunnelTokenizer
lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = 2
def __init__( self : Optional[int] ,_snake_case : Dict=None ,_snake_case : List[str]=None ,_snake_case : Optional[Any]=True ,_snake_case : Optional[int]="<unk>" ,_snake_case : Dict="<sep>" ,_snake_case : Any="<pad>" ,_snake_case : str="<cls>" ,_snake_case : Optional[Any]="<mask>" ,_snake_case : int="<s>" ,_snake_case : Dict="</s>" ,_snake_case : Optional[int]=True ,_snake_case : List[str]=True ,_snake_case : Dict=None ,_snake_case : str="##" ,**_snake_case : Optional[Any] ,) -> Any:
"""simple docstring"""
super().__init__(
_snake_case ,tokenizer_file=_snake_case ,do_lower_case=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,clean_text=_snake_case ,tokenize_chinese_chars=_snake_case ,strip_accents=_snake_case ,wordpieces_prefix=_snake_case ,**_snake_case ,)
lowercase__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_snake_case ) != tokenize_chinese_chars
):
lowercase__ : List[str] = getattr(_snake_case ,normalizer_state.pop('''type''' ) )
lowercase__ : List[str] = do_lower_case
lowercase__ : Any = strip_accents
lowercase__ : Union[str, Any] = tokenize_chinese_chars
lowercase__ : Union[str, Any] = normalizer_class(**_snake_case )
lowercase__ : List[str] = do_lower_case
def UpperCAmelCase ( self : int ,_snake_case : Tuple ,_snake_case : Tuple=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self : Tuple ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : Tuple ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ : str = self._tokenizer.model.save(_snake_case ,name=_snake_case )
return tuple(_snake_case )
| 16 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(lowercase_ ,"_dynamo" ):
return False
return isinstance(lowercase_ ,torch._dynamo.eval_frame.OptimizedModule )
def lowercase__ ( lowercase_ ,lowercase_ = True ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCamelCase : int = is_compiled_module(lowercase_ )
if is_compiled:
_UpperCamelCase : Dict = model
_UpperCamelCase : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Tuple = model.module
if not keep_fpaa_wrapper:
_UpperCamelCase : Tuple = getattr(lowercase_ ,"forward" )
_UpperCamelCase : str = model.__dict__.pop("_original_forward" ,lowercase_ )
if original_forward is not None:
while hasattr(lowercase_ ,"__wrapped__" ):
_UpperCamelCase : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
_UpperCamelCase : int = forward
if getattr(lowercase_ ,"_converted_to_transformer_engine" ,lowercase_ ):
convert_model(lowercase_ ,to_transformer_engine=lowercase_ )
if is_compiled:
_UpperCamelCase : Dict = model
_UpperCamelCase : List[Any] = compiled_model
return model
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
PartialState().wait_for_everyone()
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase_ ,lowercase_ )
elif PartialState().local_process_index == 0:
torch.save(lowercase_ ,lowercase_ )
@contextmanager
def lowercase__ ( **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for key, value in kwargs.items():
_UpperCamelCase : Any = str(lowercase_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
if not hasattr(lowercase_ ,"__qualname__" ) and not hasattr(lowercase_ ,"__name__" ):
_UpperCamelCase : Union[str, Any] = getattr(lowercase_ ,"__class__" ,lowercase_ )
if hasattr(lowercase_ ,"__qualname__" ):
return obj.__qualname__
if hasattr(lowercase_ ,"__name__" ):
return obj.__name__
return str(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for key, value in source.items():
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Any = destination.setdefault(lowercase_ ,{} )
merge_dicts(lowercase_ ,lowercase_ )
else:
_UpperCamelCase : Union[str, Any] = value
return destination
def lowercase__ ( lowercase_ = None ) -> bool:
"""simple docstring"""
if port is None:
_UpperCamelCase : Optional[int] = 29_500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 363 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase_ ( a):
def __init__( self, __a = "▁", __a = True, __a = "<unk>", __a = "</s>", __a = "<pad>", ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
_lowerCAmelCase : str = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
_lowerCAmelCase : Dict = token_dict["token"]
_lowerCAmelCase : int = Tokenizer(Unigram())
_lowerCAmelCase : Any = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}"), " "),
normalizers.Lowercase(),
])
_lowerCAmelCase : Tuple = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__a, add_prefix_space=__a),
pre_tokenizers.Digits(individual_digits=__a),
pre_tokenizers.Punctuation(),
])
_lowerCAmelCase : List[str] = decoders.Metaspace(replacement=__a, add_prefix_space=__a)
_lowerCAmelCase : Tuple = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}", special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
_lowerCAmelCase : List[Any] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__a, __a)
def snake_case__ ( self, __a, __a = 8000, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Dict = trainers.UnigramTrainer(
vocab_size=__a, special_tokens=self.special_tokens_list, show_progress=__a, )
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [files]
self._tokenizer.train(__a, trainer=__a)
self.add_unk_id()
def snake_case__ ( self, __a, __a = 8000, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = trainers.UnigramTrainer(
vocab_size=__a, special_tokens=self.special_tokens_list, show_progress=__a, )
self._tokenizer.train_from_iterator(__a, trainer=__a)
self.add_unk_id()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = json.loads(self._tokenizer.to_str())
_lowerCAmelCase : Tuple = self.special_tokens["unk"]["id"]
_lowerCAmelCase : Any = Tokenizer.from_str(json.dumps(__a))
| 36 | def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
_A = set()
# Replace all the whitespace in our sentence
_A = input_str.replace(""" """ , """""")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(snake_case__) == 26
def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
_A = [False] * 26
for char in input_str:
if char.islower():
_A = True
elif char.isupper():
_A = True
return all(snake_case__)
def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def snake_case ( ) -> None:
from timeit import timeit
_A = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=snake_case__))
print(timeit("""is_pangram_faster()""" , setup=snake_case__))
print(timeit("""is_pangram_fastest()""" , setup=snake_case__))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 180 | 0 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCAmelCase__ ( ) -> tuple[list[int], int]:
"""simple docstring"""
snake_case = [randint(-1_0_0_0 , 1_0_0_0 ) for i in range(1_0 )]
snake_case = randint(-5_0_0_0 , 5_0_0_0 )
return (arr, r)
SCREAMING_SNAKE_CASE__ = make_dataset()
def lowerCAmelCase__ ( _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(_UpperCamelCase , 3 ):
if sum(_UpperCamelCase ) == target:
return tuple(sorted(_UpperCamelCase ) )
return (0, 0, 0)
def lowerCAmelCase__ ( _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
snake_case = len(_UpperCamelCase )
for i in range(n - 1 ):
snake_case ,snake_case = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCAmelCase__ ( ) -> tuple[float, float]:
"""simple docstring"""
snake_case = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
snake_case = '\ntriplet_sum1(*dataset)\n'
snake_case = '\ntriplet_sum2(*dataset)\n'
snake_case = repeat(setup=_UpperCamelCase , stmt=_UpperCamelCase , repeat=5 , number=1_0_0_0_0 )
snake_case = repeat(setup=_UpperCamelCase , stmt=_UpperCamelCase , repeat=5 , number=1_0_0_0_0 )
return (min(_UpperCamelCase ), min(_UpperCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE__ = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 149 | """simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False ) -> Any:
"""simple docstring"""
try:
snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case = default
else:
# KEY is set, convert it to True or False.
try:
snake_case = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
SCREAMING_SNAKE_CASE__ = parse_flag_from_env("RUN_SLOW", default=False)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return unittest.skip('Test was skipped' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=None ) -> int:
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCamelCase ) , f"""test requires torch version >= {version}""" )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCamelCase )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = True
@classmethod
def snake_case ( cls ):
"""simple docstring"""
snake_case = tempfile.mkdtemp()
@classmethod
def snake_case ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = mocks if isinstance(lowerCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
snake_case = AcceleratorState()
snake_case = tensor[None].clone().to(state.device )
snake_case = gather(_UpperCamelCase ).cpu()
snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = returncode
snake_case = stdout
snake_case = stderr
async def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
while True:
snake_case = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=False , _UpperCamelCase : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(_UpperCamelCase ) )
snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case = []
snake_case = []
def tee(_UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]="" ):
snake_case = line.decode('utf-8' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : str=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Tuple=1_8_0 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[Any]=True ) -> _RunOutput:
"""simple docstring"""
snake_case = asyncio.get_event_loop()
snake_case = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
snake_case = ' '.join(_UpperCamelCase )
if result.returncode > 0:
snake_case = '\n'.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
try:
snake_case = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , 'decode' ):
snake_case = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 149 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : str=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
lowerCAmelCase = nn.Parameter(lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
lowerCAmelCase = nn.Parameter(lowerCamelCase )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : int ):
# set torch weights for 1-to-1 comparison
lowerCAmelCase = np.asarray(weights[0] )
lowerCAmelCase = np.asarray(weights[1] )
lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase ).view(-1 , lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def a_ ( lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] ):
# set torch weights for 1-to-1 comparison
lowerCAmelCase = np.asarray(weights[0] )
lowerCAmelCase = np.asarray(weights[1] )
lowerCAmelCase = np.asarray(weights[2] )
lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase ).view(-1 , lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ):
# layernorm 1
lowerCAmelCase = weights[0][0][0]
lowerCAmelCase = np.asarray(layer_norm_a[0] )
lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) , )
# lsh weights + output
lowerCAmelCase = weights[0][1]
if len(lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase , torch_block.attention , lowerCamelCase )
else:
set_layer_weights_in_torch_local(lowerCamelCase , torch_block.attention , lowerCamelCase )
# intermediate weighs
lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase ) == 4:
lowerCAmelCase = intermediate_weights[2]
# layernorm 2
lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) , )
# intermediate dense
lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase ) , )
# intermediate out
lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase ) , )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Dict ):
# reformer model
lowerCAmelCase = torch_model.reformer
# word embeds
lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase ) , )
if isinstance(weights[3] , lowerCamelCase ):
lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
lowerCAmelCase = nn.Parameter(torch.tensor(lowerCamelCase ) )
lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# output layer norm
lowerCAmelCase = np.asarray(weights[7][0] )
lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) , )
# output embeddings
lowerCAmelCase = np.asarray(weights[9][0] )
lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase ) , )
def a_ ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : str ):
# Initialise PyTorch model
lowerCAmelCase = ReformerConfig.from_json_file(lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase = ReformerModelWithLMHead(lowerCamelCase )
with open(lowerCamelCase , 'rb' ) as f:
lowerCAmelCase = pickle.load(lowerCamelCase )['weights']
set_model_weights_in_torch(lowerCamelCase , lowerCamelCase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 4 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : int ) ->List[Any]:
"""simple docstring"""
if openai_config_file == "":
__snake_case : Dict = OpenAIGPTConfig()
else:
__snake_case : int = OpenAIGPTConfig.from_json_file(_snake_case )
__snake_case : Tuple = OpenAIGPTModel(_snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
__snake_case : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__snake_case : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 102 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : Any = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __A :
lowerCAmelCase_ : List[Any] = PegasusConfig
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : int = "gelu"
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=99 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : Tuple=0 , ):
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Tuple = is_training
lowerCAmelCase : Dict = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : Optional[Any] = eos_token_id
lowerCAmelCase : List[str] = pad_token_id
lowerCAmelCase : Optional[int] = bos_token_id
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase : Tuple = prepare_pegasus_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Union[str, Any] = 20
lowerCAmelCase : Tuple = model_class_name(UpperCAmelCase_ )
lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] )
lowerCAmelCase : Dict = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCAmelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def lowercase__ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Tuple = 20
lowerCAmelCase : Tuple = model_class_name(UpperCAmelCase_ )
lowerCAmelCase : Any = model.encode(inputs_dict['input_ids'] )
lowerCAmelCase : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : int = model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ )
lowerCAmelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=None, ):
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase : Dict = np.not_equal(_UpperCAmelCase, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase : str = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ : Dict = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Union[str, Any] = False
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = FlaxPegasusModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Any = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : int = model_class(UpperCAmelCase_ )
@jax.jit
def encode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Any ):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase : Optional[Any] = encode_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase : Tuple = encode_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Dict = model_class(UpperCAmelCase_ )
lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
lowerCAmelCase : Union[str, Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest('JIT Enabled' ):
lowerCAmelCase : Optional[int] = decode_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase : Optional[Any] = decode_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Any = model_class_name.from_pretrained('google/pegasus-large' , from_pt=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = np.ones((1, 1) )
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@slow
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : str = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
lowerCAmelCase : int = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
lowerCAmelCase : Optional[int] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCAmelCase : List[str] = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
lowerCAmelCase : List[Any] = tokenizer(UpperCAmelCase_ , return_tensors='np' , truncation=UpperCAmelCase_ , max_length=512 , padding=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = model.generate(**UpperCAmelCase_ , num_beams=2 ).sequences
lowerCAmelCase : List[str] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
assert tgt_text == decoded
| 362 |
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 323 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A_ ( _lowerCAmelCase ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A_ ( ) -> int:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCamelCase : int = [1, 2, 3]
with pytest.raises(_lowerCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=2 )
with pytest.raises(_lowerCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Any = [1, 2]
UpperCamelCase : Dict = {"a": 1, "b": 2}
UpperCamelCase : Union[str, Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : List[Any] = {"a": {"1": 1}, "b": 2}
UpperCamelCase : Dict = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Any = [2, 3]
UpperCamelCase : Any = {"a": 2, "b": 3}
UpperCamelCase : Any = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : Optional[int] = {"a": {"1": 2}, "b": 3}
UpperCamelCase : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa
| 52 |
def __lowercase ( a__=2_81_23 ) -> List[str]:
__SCREAMING_SNAKE_CASE = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(a__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 257 | 0 |
def UpperCamelCase (lowercase_: list , lowercase_: list , lowercase_: int , lowercase_: int , lowercase_: int ) -> int:
if index == number_of_items:
return 0
A__ : int = 0
A__ : Tuple = 0
A__ : str = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ , index + 1 )
if weights[index] <= max_weight:
A__ : List[Any] = values[index] + knapsack(
lowercase_ , lowercase_ , lowercase_ , max_weight - weights[index] , index + 1 )
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase (lowercase_: str , lowercase_: Optional[int] ) -> str:
A__ : Union[str, Any] = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ : Any = old_name.split(""".""" )
if layer == "0":
A__ : List[Any] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
A__ : Optional[int] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
A__ : int = old_name.replace("""3""" , """convolution2""" )
else:
A__ : Dict = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , lowercase_ ):
A__ : str = r"""\b\d{2}\b"""
if bool(re.search(lowercase_ , lowercase_ ) ):
A__ : Optional[Any] = re.search(r"""\d\.\d\d.""" , lowercase_ ).group()
else:
A__ : int = re.search(r"""\d\.\d.""" , lowercase_ ).group()
if int(match[0] ) < 6:
A__ : Optional[Any] = old_name.replace(lowercase_ , """""" )
A__ : Tuple = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
A__ : int = """intermediate_stages.""" + trimmed_name
else:
A__ : Dict = old_name.replace(lowercase_ , """""" )
if int(match[2] ) < num_meta4D_last_stage:
A__ : Optional[int] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
A__ : Optional[Any] = str(int(match[2] ) - num_meta4D_last_stage )
A__ : Dict = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
A__ : str = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
A__ : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
A__ : List[Any] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
A__ : Optional[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
A__ : str = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , lowercase_ ):
A__ : List[str] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
A__ : Optional[int] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ : Optional[int] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ : int = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
A__ : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
A__ : Optional[int] = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
A__ : Optional[Any] = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
A__ : Optional[Any] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ : Union[str, Any] = new_name.replace("""norm""" , """layernorm""" )
A__ : Union[str, Any] = """efficientformer.""" + new_name
else:
A__ : int = """efficientformer.encoder.""" + new_name
return new_name
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Union[str, Any] ) -> Tuple:
for key in checkpoint.copy().keys():
A__ : List[Any] = checkpoint.pop(lowercase_ )
A__ : Dict = val
return checkpoint
def UpperCamelCase () -> Optional[int]:
A__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : List[str] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
def UpperCamelCase (lowercase_: Path , lowercase_: Path , lowercase_: Path , lowercase_: bool ) -> Tuple:
A__ : Any = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
A__ : List[Any] = EfficientFormerConfig.from_json_file(lowercase_ )
A__ : Any = EfficientFormerForImageClassificationWithTeacher(lowercase_ )
A__ : List[str] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
A__ : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
A__ : Any = convert_torch_checkpoint(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A__ : Optional[int] = prepare_img()
A__ : Optional[Any] = 256
A__ : str = 224
A__ : List[str] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A__ : Tuple = processor(images=lowercase_ , return_tensors="""pt""" ).pixel_values
# original processing pipeline
A__ : List[Any] = Compose(
[
Resize(lowercase_ , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(lowercase_ ),
ToTensor(),
Normalize(lowercase_ , lowercase_ ),
] )
A__ : Any = image_transforms(lowercase_ ).unsqueeze(0 )
assert torch.allclose(lowercase_ , lowercase_ )
A__ : Optional[int] = model(lowercase_ )
A__ : List[str] = outputs.logits
A__ : Tuple = (1, 1000)
if "l1" in model_name:
A__ : List[str] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ : Any = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ : Union[str, Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add model""" , use_temp_dir=lowercase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add image processor""" , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
A_ : List[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 141 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> None:
lowercase__ : Optional[Any] = generate_pascal_triangle(__lowerCamelCase )
for row_idx in range(__lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def __UpperCAmelCase ( __lowerCamelCase ) -> list[list[int]]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowercase__ : list[list[int]] = []
for current_row_idx in range(__lowerCamelCase ):
lowercase__ : int = populate_current_row(__lowerCamelCase , __lowerCamelCase )
triangle.append(__lowerCamelCase )
return triangle
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> list[int]:
lowercase__ : Union[str, Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowercase__ , lowercase__ : Tuple = 1, 1
for current_col_idx in range(1 , __lowerCamelCase ):
calculate_current_element(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return current_row
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> None:
lowercase__ : Optional[Any] = triangle[current_row_idx - 1][current_col_idx - 1]
lowercase__ : Dict = triangle[current_row_idx - 1][current_col_idx]
lowercase__ : Dict = above_to_left_elt + above_to_right_elt
def __UpperCAmelCase ( __lowerCamelCase ) -> list[list[int]]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowercase__ : list[list[int]] = [[1]]
for row_index in range(1 , __lowerCamelCase ):
lowercase__ : Any = [0] + result[-1] + [0]
lowercase__ : List[Any] = row_index + 1
# Calculate the number of distinct elements in a row
lowercase__ : List[str] = sum(divmod(__lowerCamelCase , 2 ) )
lowercase__ : Dict = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowercase__ : List[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowercase__ : List[Any] = row_first_half + row_second_half
result.append(__lowerCamelCase )
return result
def __UpperCAmelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCamelCase , __lowerCamelCase ) -> None:
lowercase__ : str = f"""{func.__name__}({value})"""
lowercase__ : Optional[int] = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCamelCase , __lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 16 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 1 |
"""simple docstring"""
import os
def __lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(os.path.dirname(lowercase ) + "/grid.txt" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : int = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : str = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Optional[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Tuple = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : int = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : Union[str, Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : Optional[int] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
snake_case : Optional[int] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 369 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Any = self.dummy_uncond_unet
snake_case : Tuple = KarrasVeScheduler()
snake_case : int = KarrasVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : List[Any] = pipe(num_inference_steps=2 , generator=UpperCamelCase__ , output_type="numpy" ).images
snake_case : Dict = torch.manual_seed(0 )
snake_case : Dict = pipe(num_inference_steps=2 , generator=UpperCamelCase__ , output_type="numpy" , return_dict=UpperCamelCase__ )[0]
snake_case : Tuple = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = "google/ncsnpp-celebahq-256"
snake_case : List[str] = UNetaDModel.from_pretrained(UpperCamelCase__ )
snake_case : Optional[Any] = KarrasVeScheduler()
snake_case : Optional[int] = KarrasVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = torch.manual_seed(0 )
snake_case : Union[str, Any] = pipe(num_inference_steps=20 , generator=UpperCamelCase__ , output_type="numpy" ).images
snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case : Any = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 112 | 0 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( __A ) -> list[int]:
'''simple docstring'''
if num <= 0:
UpperCAmelCase__ = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(__A )
UpperCAmelCase__ = [True] * (num + 1)
UpperCAmelCase__ = []
UpperCAmelCase__ = 2
UpperCAmelCase__ = int(math.sqrt(__A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__A )
# Set multiples of start be False
for i in range(start * start, num + 1, __A ):
if sieve[i] is True:
UpperCAmelCase__ = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(__A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 65 | import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
from __future__ import annotations
_lowerCamelCase : Union[str, Any] = tuple[int, int, int]
_lowerCamelCase : Any = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_lowerCamelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
_lowerCamelCase : List[str] = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
_lowerCamelCase : Dict = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
_lowerCamelCase : Any = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
_lowerCamelCase : List[str] = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
_lowerCamelCase : int = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
_lowerCamelCase : List[Any] = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
_lowerCamelCase : List[str] = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
_lowerCamelCase : Union[str, Any] = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
_lowerCamelCase : int = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
_lowerCamelCase : Any = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def _a ( SCREAMING_SNAKE_CASE__ : RotorPositionT , SCREAMING_SNAKE_CASE__ : RotorSelectionT , SCREAMING_SNAKE_CASE__ : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
if (unique_rotsel := len(set(SCREAMING_SNAKE_CASE__ ) )) < 3:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(SCREAMING_SNAKE_CASE__ )
# Checks if rotor positions are valid
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = rotpos
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Dict = f'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = f'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = f'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Validates string and returns dict
SCREAMING_SNAKE_CASE__ : Dict = _plugboard(SCREAMING_SNAKE_CASE__ )
return rotpos, rotsel, pbdict
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> dict[str, str]:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''Plugboard setting isn\'t type string ({type(SCREAMING_SNAKE_CASE__ )})'''
raise TypeError(SCREAMING_SNAKE_CASE__ )
elif len(SCREAMING_SNAKE_CASE__ ) % 2 != 0:
SCREAMING_SNAKE_CASE__ : str = f'''Odd number of symbols ({len(SCREAMING_SNAKE_CASE__ )})'''
raise Exception(SCREAMING_SNAKE_CASE__ )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
SCREAMING_SNAKE_CASE__ : str = set()
for i in pbstring:
if i not in abc:
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''\'{i}\' not in list of symbols'''
raise Exception(SCREAMING_SNAKE_CASE__ )
elif i in tmppbl:
SCREAMING_SNAKE_CASE__ : int = f'''Duplicate symbol ({i})'''
raise Exception(SCREAMING_SNAKE_CASE__ )
else:
tmppbl.add(SCREAMING_SNAKE_CASE__ )
del tmppbl
# Created the dictionary
SCREAMING_SNAKE_CASE__ : str = {}
for j in range(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 , 2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = pbstring[j + 1]
SCREAMING_SNAKE_CASE__ : Dict = pbstring[j]
return pb
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : RotorPositionT , SCREAMING_SNAKE_CASE__ : RotorSelectionT = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE__ : str = "" , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = text.upper()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = _validator(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , plugb.upper() )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = rotor_position
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
SCREAMING_SNAKE_CASE__ : Optional[int] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
SCREAMING_SNAKE_CASE__ : Any = plugboard[symbol]
# rotor ra --------------------------
SCREAMING_SNAKE_CASE__ : List[Any] = abc.index(SCREAMING_SNAKE_CASE__ ) + rotorposa
SCREAMING_SNAKE_CASE__ : str = rotora[index % len(SCREAMING_SNAKE_CASE__ )]
# rotor rb --------------------------
SCREAMING_SNAKE_CASE__ : Any = abc.index(SCREAMING_SNAKE_CASE__ ) + rotorposa
SCREAMING_SNAKE_CASE__ : Optional[Any] = rotora[index % len(SCREAMING_SNAKE_CASE__ )]
# rotor rc --------------------------
SCREAMING_SNAKE_CASE__ : Tuple = abc.index(SCREAMING_SNAKE_CASE__ ) + rotorposa
SCREAMING_SNAKE_CASE__ : List[Any] = rotora[index % len(SCREAMING_SNAKE_CASE__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
SCREAMING_SNAKE_CASE__ : Dict = reflector[symbol]
# 2nd rotors
SCREAMING_SNAKE_CASE__ : int = abc[rotora.index(SCREAMING_SNAKE_CASE__ ) - rotorposa]
SCREAMING_SNAKE_CASE__ : int = abc[rotora.index(SCREAMING_SNAKE_CASE__ ) - rotorposa]
SCREAMING_SNAKE_CASE__ : Any = abc[rotora.index(SCREAMING_SNAKE_CASE__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Dict = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : str = '''This is my Python script that emulates the Enigma machine from WWII.'''
_lowerCamelCase : Any = (1, 1, 1)
_lowerCamelCase : Optional[int] = '''pictures'''
_lowerCamelCase : Dict = (rotora, rotora, rotora)
_lowerCamelCase : Optional[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 191 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = analyze_text(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE__ : str = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE__ : Optional[int] = single_char_strings[ch]
SCREAMING_SNAKE_CASE__ : Any = my_str / all_sum
my_fir_sum += prob * math.loga(SCREAMING_SNAKE_CASE__ ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE__ : List[str] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE__ : Optional[Any] = two_char_strings[sequence]
SCREAMING_SNAKE_CASE__ : Tuple = int(SCREAMING_SNAKE_CASE__ ) / all_sum
my_sec_sum += prob * math.loga(SCREAMING_SNAKE_CASE__ )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> tuple[dict, dict]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = Counter() # type: ignore
SCREAMING_SNAKE_CASE__ : Dict = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _a ( ) -> str:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 191 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ = logging.getLogger()
def _a ( ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("-f" )
__lowerCAmelCase = parser.parse_args()
return args.f
class a__ ( snake_case__ ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(_A , "argv" , _A ):
__lowerCAmelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_A , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(_A )
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(_A )
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(_A )
| 92 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> str:
class snake_case :
def __init__( self : int , UpperCamelCase__ : Optional[int])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = metric_id
class snake_case :
SCREAMING_SNAKE_CASE_ : List[Any] = [MetricMock(__snake_case ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if "tmp_path" in args:
__lowerCAmelCase: Tuple = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__SCREAMING_SNAKE_CASE , match="https://huggingface.co/docs/evaluate" ):
func(*__SCREAMING_SNAKE_CASE )
| 217 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCamelCase : Tuple = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowercase ( unittest.TestCase ):
lowercase__ : Any = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase__ : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase__ : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __snake_case( self : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ZeroShotClassificationPipeline(
model=_UpperCamelCase , tokenizer=_UpperCamelCase , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __snake_case( self : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase )]} )
# No kwarg
SCREAMING_SNAKE_CASE = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase )]} )
SCREAMING_SNAKE_CASE = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase )]} )
SCREAMING_SNAKE_CASE = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
SCREAMING_SNAKE_CASE = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
SCREAMING_SNAKE_CASE = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(_UpperCamelCase , {"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
_UpperCamelCase , [
{"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(1 )
] , )
SCREAMING_SNAKE_CASE = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
_UpperCamelCase , [
{"sequence": ANY(_UpperCamelCase ), "labels": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], "scores": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(_UpperCamelCase ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(_UpperCamelCase ):
classifier(_UpperCamelCase , candidate_labels="politics" )
with self.assertRaises(_UpperCamelCase ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(_UpperCamelCase ):
classifier("Who are you voting for in 2020?" , candidate_labels=_UpperCamelCase )
with self.assertRaises(_UpperCamelCase ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(_UpperCamelCase ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=_UpperCamelCase , )
self.run_entailment_id(_UpperCamelCase )
def __snake_case( self : List[str] , _UpperCamelCase : Pipeline ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE = config.labelaid
SCREAMING_SNAKE_CASE = zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
SCREAMING_SNAKE_CASE = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
SCREAMING_SNAKE_CASE = original_labelaid
self.assertEqual(_UpperCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def __snake_case( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def __snake_case( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 206 | def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
while b:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = b, a % b
return a
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase__ , a % b )
def __lowerCamelCase ():
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 206 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = 0
@slow
def snake_case__ ( self : Dict ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
snake_case_ = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
snake_case_ = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowercase ) , 0 )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
# Check that tokenizer_type ≠ model_type
snake_case_ = AutoTokenizer.from_pretrained(__lowercase , config=__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def snake_case__ ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__lowercase , "vocab.txt" ) )
snake_case_ = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="bert" , use_fast=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__lowercase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__lowercase , "merges.txt" ) )
snake_case_ = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="gpt2" , use_fast=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@require_tokenizers
def snake_case__ ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__lowercase , "vocab.txt" ) )
snake_case_ = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="bert" )
self.assertIsInstance(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__lowercase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__lowercase , "merges.txt" ) )
snake_case_ = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="gpt2" )
self.assertIsInstance(__lowercase , __lowercase )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
with pytest.raises(__lowercase ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
snake_case_ = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowercase , __lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowercase )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def snake_case__ ( self : Any ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowercase , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
snake_case_ = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = TOKENIZER_MAPPING.values()
snake_case_ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowercase )
@require_tokenizers
def snake_case__ ( self : Tuple ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=__lowercase ) , __lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , __lowercase )
@require_tokenizers
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=__lowercase )
snake_case_ = "Hello, world. How are you?"
snake_case_ = tokenizer.tokenize(__lowercase )
self.assertEqual("[UNK]" , tokens[0] )
snake_case_ = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=__lowercase )
snake_case_ = tokenizer.tokenize(__lowercase )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(__lowercase ) , __lowercase )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
snake_case_ = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowercase , __lowercase )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = get_tokenizer_config("bert-base-cased" )
snake_case_ = config.pop("_commit_hash" , __lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowercase , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
snake_case_ = get_tokenizer_config(__lowercase )
self.assertDictEqual(__lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
snake_case_ = AutoTokenizer.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
snake_case_ = get_tokenizer_config(__lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def snake_case__ ( self : int ):
"""simple docstring"""
try:
AutoConfig.register("custom" , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
snake_case_ = CustomTokenizer.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
snake_case_ = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
try:
AutoConfig.register("custom" , __lowercase )
# Can register in two steps
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowercase , slow_tokenizer_class=__lowercase , fast_tokenizer_class=__lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = BertTokenizerFast.from_pretrained(__lowercase )
bert_tokenizer.save_pretrained(__lowercase )
snake_case_ = CustomTokenizerFast.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
snake_case_ = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
snake_case_ = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : str ):
"""simple docstring"""
with self.assertRaises(__lowercase ):
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
snake_case_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase )
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
snake_case_ = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
snake_case_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
snake_case_ = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def snake_case__ ( self : Any ):
"""simple docstring"""
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = False
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = NewTokenizer
lowerCAmelCase_ = False
try:
AutoConfig.register("custom" , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase )
# If remote code is not set, the default is to use local
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
snake_case_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
snake_case_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
snake_case_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
snake_case_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
snake_case_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , "bert-base is not a local folder and is not a valid model identifier" ):
snake_case_ = AutoTokenizer.from_pretrained("bert-base" )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
snake_case_ = AutoTokenizer.from_pretrained(__lowercase , revision="aaaaaa" )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 187 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase__ : str = NewType("DataClass", Any)
lowercase__ : Union[str, Any] = NewType("DataClassType", Any)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if isinstance(_A , _A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {str(_A ): choice for choice in choices}
return lambda _A : str_to_choice.get(_A , _A )
def lowerCamelCase__ ( *,
_A = None , _A = None , _A = dataclasses.MISSING , _A = dataclasses.MISSING , _A = None , **_A , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case_ = {}
if aliases is not None:
snake_case_ = aliases
if help is not None:
snake_case_ = help
return dataclasses.field(metadata=_A , default=_A , default_factory=_A , **_A )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = 42
def __init__( self : Optional[Any] , __lowercase : Union[DataClassType, Iterable[DataClassType]] , **__lowercase : Optional[int] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
snake_case_ = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
snake_case_ = [dataclass_types]
snake_case_ = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def snake_case__ ( __lowercase : ArgumentParser , __lowercase : dataclasses.Field ):
"""simple docstring"""
snake_case_ = f"--{field.name}"
snake_case_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
snake_case_ = kwargs.pop("aliases" , [] )
if isinstance(__lowercase , __lowercase ):
snake_case_ = [aliases]
snake_case_ = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__lowercase , "UnionType" ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f" Problem encountered in field '{field.name}'." )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
snake_case_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case_ = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case_ = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case_ = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case_ = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
snake_case_ = field.type.__args__
else:
snake_case_ = [x.value for x in field.type]
snake_case_ = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
else:
snake_case_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case_ = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
snake_case_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case_ = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case_ = "?"
# This is the value that will get picked if we do --field_name (without value)
snake_case_ = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
snake_case_ = field.type.__args__[0]
snake_case_ = "+"
if field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case_ = True
else:
snake_case_ = field.type
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
else:
snake_case_ = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case_ = False
parser.add_argument(f"--no_{field.name}" , action="store_false" , dest=field.name , **__lowercase )
def snake_case__ ( self : List[str] , __lowercase : DataClassType ):
"""simple docstring"""
if hasattr(__lowercase , "_argument_group_name" ):
snake_case_ = self.add_argument_group(dtype._argument_group_name )
else:
snake_case_ = self
try:
snake_case_ = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowercase ):
snake_case_ = ".".join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
snake_case_ = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any]=None , __lowercase : Union[str, Any]=False , __lowercase : List[str]=True , __lowercase : int=None , __lowercase : Optional[int]=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case_ = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case_ = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case_ , snake_case_ = args_file_parser.parse_known_args(args=__lowercase )
snake_case_ = vars(__lowercase ).get(args_file_flag.lstrip("-" ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
snake_case_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case_ = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case_ , snake_case_ = self.parse_known_args(args=__lowercase )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
snake_case_ = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
snake_case_ = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def snake_case__ ( self : int , __lowercase : Dict[str, Any] , __lowercase : bool = False ):
"""simple docstring"""
snake_case_ = set(args.keys() )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
snake_case_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case_ = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}" )
return tuple(__lowercase )
def snake_case__ ( self : List[Any] , __lowercase : str , __lowercase : bool = False ):
"""simple docstring"""
with open(Path(__lowercase ) , encoding="utf-8" ) as open_json_file:
snake_case_ = json.loads(open_json_file.read() )
snake_case_ = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def snake_case__ ( self : int , __lowercase : str , __lowercase : bool = False ):
"""simple docstring"""
snake_case_ = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase )
| 187 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
_lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_config(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
AutoTokenizer.from_pretrained(UpperCamelCase_ ).save_pretrained(UpperCamelCase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 159 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __snake_case (ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowerCAmelCase__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _UpperCAmelCase ():
'''simple docstring'''
if os.name == "nt":
_lowerCAmelCase : Tuple = CursorInfo()
_lowerCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
_lowerCAmelCase : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def _UpperCAmelCase ():
'''simple docstring'''
if os.name == "nt":
_lowerCAmelCase : Any = CursorInfo()
_lowerCAmelCase : str = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
_lowerCAmelCase : List[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def _UpperCAmelCase ():
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 159 | 1 |
lowercase__ :Tuple = range(2, 20 + 1)
lowercase__ :Optional[int] = [10**k for k in range(ks[-1] + 1)]
lowercase__ :Any = {}
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
lowercase = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
lowercase , lowercase = 0, 0
lowercase = n - i
lowercase = memo.get(__UpperCamelCase )
if sub_memo is not None:
lowercase = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
lowercase = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase = _k
break
if max_jump >= 0:
lowercase , lowercase , lowercase = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
lowercase , lowercase = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
lowercase = []
else:
lowercase = {c: []}
lowercase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase , lowercase = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase , lowercase = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
lowercase = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase = i
lowercase , lowercase , lowercase = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase = ds_c + ds_b
diff += addend
lowercase = 0
for j in range(__UpperCamelCase ):
lowercase = a_i[j] + addend
lowercase , lowercase = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
lowercase = digits[j] + addend
if s >= 10:
lowercase , lowercase = divmod(__UpperCamelCase , 10 )
lowercase = addend // 10 + quotient
else:
lowercase = s
lowercase = addend // 10
if addend == 0:
break
while addend > 0:
lowercase , lowercase = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def UpperCamelCase ( lowerCAmelCase__ = 10**15 ):
'''simple docstring'''
lowercase = [1]
lowercase = 1
lowercase = 0
while True:
lowercase , lowercase = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowercase = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 101 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class snake_case :
'''simple docstring'''
def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ):
'''simple docstring'''
__A = np.random.default_rng(_lowerCamelCase )
__A = length
__A = rng.normal(size=(length,) ).astype(np.floataa )
__A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa )
def __len__( self : str ):
'''simple docstring'''
return self.length
def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class snake_case ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
super().__init__()
__A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__A = True
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__A = False
return x * self.a[0] + self.b[0]
class snake_case ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ):
'''simple docstring'''
super().__init__()
__A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
__A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
__A = True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ):
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__A = False
return x * self.a + self.b
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__A = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__A = load_dataset('''csv''' , data_files=__UpperCamelCase )
__A = datasets['''train'''].unique('''label''' )
__A = {v: i for i, v in enumerate(__UpperCamelCase )}
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__A = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' )
if "label" in examples:
__A = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__A = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 )
__A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 266 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class A ( __UpperCAmelCase ):
__snake_case = 'transfo-xl'
__snake_case = ['mems']
__snake_case = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, UpperCamelCase__=26_7735, UpperCamelCase__=[2_0000, 4_0000, 20_0000], UpperCamelCase__=1024, UpperCamelCase__=1024, UpperCamelCase__=16, UpperCamelCase__=64, UpperCamelCase__=4096, UpperCamelCase__=4, UpperCamelCase__=False, UpperCamelCase__=18, UpperCamelCase__=1600, UpperCamelCase__=1000, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=0, UpperCamelCase__=-1, UpperCamelCase__=True, UpperCamelCase__=0.1, UpperCamelCase__=0.0, UpperCamelCase__=True, UpperCamelCase__="normal", UpperCamelCase__=0.01, UpperCamelCase__=0.01, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=0, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = []
self.cutoffs.extend(UpperCamelCase__ )
if proj_share_all_but_first:
lowerCAmelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase_ = [False] + [False] * len(self.cutoffs )
lowerCAmelCase_ = d_model
lowerCAmelCase_ = d_embed
lowerCAmelCase_ = d_head
lowerCAmelCase_ = d_inner
lowerCAmelCase_ = div_val
lowerCAmelCase_ = pre_lnorm
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = mem_len
lowerCAmelCase_ = same_length
lowerCAmelCase_ = attn_type
lowerCAmelCase_ = clamp_len
lowerCAmelCase_ = sample_softmax
lowerCAmelCase_ = adaptive
lowerCAmelCase_ = dropout
lowerCAmelCase_ = dropatt
lowerCAmelCase_ = untie_r
lowerCAmelCase_ = init
lowerCAmelCase_ = init_range
lowerCAmelCase_ = proj_init_std
lowerCAmelCase_ = init_std
lowerCAmelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 370 |
import pprint
import requests
_A = '''https://zenquotes.io/api'''
def __UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_A = random_quotes()
pprint.pprint(response)
| 167 | 0 |
def A ( a_ = 1 ,a_ = 1_000 ) -> int:
__UpperCamelCase : Any =1
__UpperCamelCase : Tuple =0
for divide_by_number in range(a_ ,digit + 1 ):
__UpperCamelCase : list[int] =[]
__UpperCamelCase : Optional[Any] =numerator
for _ in range(1 ,digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(a_ ):
__UpperCamelCase : Dict =len(a_ )
__UpperCamelCase : List[Any] =divide_by_number
else:
has_been_divided.append(a_ )
__UpperCamelCase : Any =now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
import re
def A ( a_ ) -> bool:
__UpperCamelCase : Any =re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(a_ ,a_ ) )
if __name__ == "__main__":
A_ :List[str] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 71 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCAmelCase ( __a : List[Any] ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = filter(lambda __a : p.requires_grad ,model.parameters() )
_a : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ = logging.getLogger(__name__)
def __UpperCAmelCase ( __a : Optional[int] ,__a : Any ) -> Union[str, Any]:
"""simple docstring"""
if metric == "rouge2":
_a : Tuple = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_a : str = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_a : List[Any] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_a : int = ModelCheckpoint(
dirpath=__a ,filename=__a ,monitor=F"""val_{metric}""" ,mode='''max''' ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def __UpperCAmelCase ( __a : Dict ,__a : Optional[Any] ) -> List[str]:
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" ,mode='''min''' if '''loss''' in metric else '''max''' ,patience=__a ,verbose=__a ,)
class UpperCAmelCase_ ( pl.Callback ):
"""simple docstring"""
def __lowercase ( self , _a , _a ) -> Dict:
_a : Any = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase_ )
@rank_zero_only
def __lowercase ( self , _a , _a , _a , _a=True ) -> List[Any]:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_a : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_a : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a : Dict = od / '''test_results.txt'''
_a : int = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_a : Optional[int] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCamelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCamelCase_ )
with open(lowerCamelCase_ , '''a+''' ) as writer:
for key in sorted(lowerCamelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_a : str = metrics[key]
if isinstance(lowerCamelCase_ , torch.Tensor ):
_a : Optional[int] = val.item()
_a : Any = F"""{key}: {val:.6f}\n"""
writer.write(lowerCamelCase_ )
if not save_generations:
return
if "preds" in metrics:
_a : Optional[int] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCamelCase_ )
@rank_zero_only
def __lowercase ( self , _a , _a ) -> Union[str, Any]:
try:
_a : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
_a : Tuple = pl_module.model.num_parameters()
_a : List[str] = count_trainable_parameters(lowerCamelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowercase ( self , _a , _a ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase_ , lowerCamelCase_ , '''test''' )
@rank_zero_only
def __lowercase ( self , _a , _a ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 354 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
def constraint_to_multiple_of(lowercase_ , lowercase_ , lowercase_=0 , lowercase_=None ):
A__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A__ = math.floor(val / multiple ) * multiple
if x < min_val:
A__ = math.ceil(val / multiple ) * multiple
return x
A__ = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size
A__ = get_image_size(lowercase__ )
A__ = output_size
# determine new height and width
A__ = output_height / input_height
A__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A__ = scale_width
else:
# fit height
A__ = scale_height
A__ = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ )
A__ = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ )
return (new_height, new_width)
class UpperCamelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase__ = ['''pixel_values''']
def __init__( self : str , UpperCAmelCase__ : Tuple = True , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : Optional[int] = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Any] = False , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Dict = True , UpperCAmelCase__ : Dict = 1 / 255 , UpperCAmelCase__ : Tuple = True , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : List[Any] = None , **UpperCAmelCase__ : Union[str, Any] , ) ->None:
'''simple docstring'''
super().__init__(**A_)
A__ = size if size is not None else {'''height''': 384, '''width''': 384}
A__ = get_size_dict(A_)
A__ = do_resize
A__ = size
A__ = keep_aspect_ratio
A__ = ensure_multiple_of
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] = False , UpperCAmelCase__ : Tuple = 1 , UpperCAmelCase__ : Optional[Any] = PILImageResampling.BICUBIC , UpperCAmelCase__ : Dict = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray:
'''simple docstring'''
A__ = get_size_dict(A_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""")
A__ = get_resize_output_image_size(
A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] = None , **UpperCAmelCase__ : int , ) ->Dict:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray:
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int = None , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : List[Any] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : List[str] = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ) ->PIL.Image.Image:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(A_)
A__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A__ = resample if resample is not None else self.resample
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = make_list_of_images(A_)
if not valid_images(A_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
A__ = [to_numpy_array(A_) for image in images]
if do_resize:
A__ = [self.resize(image=A_ , size=A_ , resample=A_) for image in images]
if do_rescale:
A__ = [self.rescale(image=A_ , scale=A_) for image in images]
if do_normalize:
A__ = [self.normalize(image=A_ , mean=A_ , std=A_) for image in images]
A__ = [to_channel_dimension_format(A_ , A_) for image in images]
A__ = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str = None) ->Any:
'''simple docstring'''
A__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_) != len(A_):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''')
if is_torch_tensor(A_):
A__ = target_sizes.numpy()
A__ = []
for idx in range(len(A_)):
A__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_)
A__ = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A_)
else:
A__ = logits.argmax(dim=1)
A__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 14 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_UpperCamelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_UpperCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_UpperCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = len([g for position, g in enumerate(lowercase__ ) if g == main_target[position]] )
return (item, float(lowercase__ ))
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : str = random.randint(0 , len(lowercase__ ) - 1 )
__lowerCAmelCase : int = parent_a[:random_slice] + parent_a[random_slice:]
__lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = list(lowercase__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowerCAmelCase : int = random.choice(lowercase__ )
return "".join(lowercase__ )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , ):
__lowerCAmelCase : str = []
# Generate more children proportionally to the fitness score.
__lowerCAmelCase : str = int(parent_a[1] * 1_0_0 ) + 1
__lowerCAmelCase : Optional[Any] = 1_0 if child_n >= 1_0 else child_n
for _ in range(lowercase__ ):
__lowerCAmelCase : List[Any] = population_score[random.randint(0 , lowercase__ )][0]
__lowerCAmelCase, __lowerCAmelCase : Dict = crossover(parent_a[0] , lowercase__ )
# Append new string to the population list.
pop.append(mutate(lowercase__ , lowercase__ ) )
pop.append(mutate(lowercase__ , lowercase__ ) )
return pop
def _lowercase ( lowercase__ , lowercase__ , lowercase__ = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__lowerCAmelCase : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowercase__ )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowerCAmelCase : Any = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowerCAmelCase : List[str] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowercase__ )
# Generate random starting population.
__lowerCAmelCase : List[Any] = []
for _ in range(lowercase__ ):
population.append(''''''.join([random.choice(lowercase__ ) for i in range(len(lowercase__ ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowerCAmelCase, __lowerCAmelCase : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowerCAmelCase : Any = [evaluate(lowercase__ , lowercase__ ) for item in population]
# Check if there is a matching evolution.
__lowerCAmelCase : Union[str, Any] = sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowerCAmelCase : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase__ )
# Normalize population score to be between 0 and 1.
__lowerCAmelCase : List[Any] = [
(item, score / len(lowercase__ )) for item, score in population_score
]
# This is selection
for i in range(lowercase__ ):
population.extend(select(population_score[int(lowercase__ )] , lowercase__ , lowercase__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase__ ) > N_POPULATION:
break
if __name__ == "__main__":
_UpperCamelCase = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_UpperCamelCase = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 275 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A : Optional[int] = 5_0_0_0_0_0
A , A : int = os.path.split(__file__)
A : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _lowerCamelCase ( _UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = dataset.map(**_UpperCamelCase )
@get_duration
def _lowerCamelCase ( _UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = dataset.filter(**_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
__lowerCAmelCase = generate_example_dataset(
os.path.join(_UpperCamelCase , "dataset.arrow" ) , _UpperCamelCase , num_examples=_UpperCamelCase )
__lowerCAmelCase = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=_UpperCamelCase )
def tokenize(_UpperCamelCase ):
return tokenizer(examples["text"] )
__lowerCAmelCase = map(_UpperCamelCase )
__lowerCAmelCase = map(_UpperCamelCase , batched=_UpperCamelCase )
__lowerCAmelCase = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="numpy" ):
__lowerCAmelCase = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="pandas" ):
__lowerCAmelCase = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="torch" , columns="numbers" ):
__lowerCAmelCase = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
__lowerCAmelCase = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase )
__lowerCAmelCase = map(_UpperCamelCase , function=_UpperCamelCase , batched=_UpperCamelCase )
__lowerCAmelCase = filter(_UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCamelCase , "wb" ) as f:
f.write(json.dumps(_UpperCamelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 259 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
A : Any = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int ="""AutoTokenizer"""
__UpperCAmelCase : Union[str, Any] =["""tokenizer"""]
__UpperCAmelCase : Tuple ={
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __a , __a=None ):
super().__init__(__a )
__lowerCAmelCase = speaker_embeddings
@classmethod
def snake_case ( cls , __a , __a="speaker_embeddings_path.json" , **__a ):
if speaker_embeddings_dict_path is not None:
__lowerCAmelCase = get_file_from_repo(
__a , __a , subfolder=kwargs.pop("subfolder" , __a ) , cache_dir=kwargs.pop("cache_dir" , __a ) , force_download=kwargs.pop("force_download" , __a ) , proxies=kwargs.pop("proxies" , __a ) , resume_download=kwargs.pop("resume_download" , __a ) , local_files_only=kwargs.pop("local_files_only" , __a ) , use_auth_token=kwargs.pop("use_auth_token" , __a ) , revision=kwargs.pop("revision" , __a ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(__a , __a )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
__lowerCAmelCase = None
else:
with open(__a ) as speaker_embeddings_json:
__lowerCAmelCase = json.load(__a )
else:
__lowerCAmelCase = None
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a , **__a )
return cls(tokenizer=__a , speaker_embeddings=__a )
def snake_case ( self , __a , __a="speaker_embeddings_path.json" , __a="speaker_embeddings" , __a = False , **__a , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__a , __a , "v2" ) , exist_ok=__a )
__lowerCAmelCase = {}
__lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__lowerCAmelCase = self._load_voice_preset(__a )
__lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , __a , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__a , )
__lowerCAmelCase = os.path.join(__a , f"{prompt_key}_{key}.npy" )
__lowerCAmelCase = tmp_dict
with open(os.path.join(__a , __a ) , "w" ) as fp:
json.dump(__a , __a )
super().save_pretrained(__a , __a , **__a )
def snake_case ( self , __a = None , **__a ):
__lowerCAmelCase = self.speaker_embeddings[voice_preset]
__lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
__lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , __a ) , cache_dir=kwargs.pop("cache_dir" , __a ) , force_download=kwargs.pop("force_download" , __a ) , proxies=kwargs.pop("proxies" , __a ) , resume_download=kwargs.pop("resume_download" , __a ) , local_files_only=kwargs.pop("local_files_only" , __a ) , use_auth_token=kwargs.pop("use_auth_token" , __a ) , revision=kwargs.pop("revision" , __a ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
__lowerCAmelCase = np.load(__a )
return voice_preset_dict
def snake_case ( self , __a = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __a=None , __a=None , __a="pt" , __a=2_56 , __a=False , __a=True , __a=False , **__a , ):
if voice_preset is not None and not isinstance(__a , __a ):
if (
isinstance(__a , __a )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__lowerCAmelCase = self._load_voice_preset(__a )
else:
if isinstance(__a , __a ) and not voice_preset.endswith(".npz" ):
__lowerCAmelCase = voice_preset + ".npz"
__lowerCAmelCase = np.load(__a )
if voice_preset is not None:
self._validate_voice_preset_dict(__a , **__a )
__lowerCAmelCase = BatchFeature(data=__a , tensor_type=__a )
__lowerCAmelCase = self.tokenizer(
__a , return_tensors=__a , padding="max_length" , max_length=__a , return_attention_mask=__a , return_token_type_ids=__a , add_special_tokens=__a , **__a , )
if voice_preset is not None:
__lowerCAmelCase = voice_preset
return encoded_text
| 259 | 1 |
"""simple docstring"""
import math
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase : Optional[Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase__ )
def lowercase__ ( _UpperCAmelCase = 1 / 1_23_45 ) -> Any:
'''simple docstring'''
lowercase : Any = 0
lowercase : Optional[Any] = 0
lowercase : int = 3
while True:
lowercase : Optional[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase__ ):
lowercase : Optional[Any] = int(lowerCamelCase__ )
total_partitions += 1
if check_partition_perfect(lowerCamelCase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 255 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[Any]) ->Tuple:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
A__ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = '''sgugger/tiny-distilbert-classification'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , only_pretrain_model=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , torchscript=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
# set architectures equal to `None`
A__ = None
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase__ , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = '''sshleifer/tinier_bart'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = '''sshleifer/tinier_bart'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , save_to_csv=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase__ , '''inf_time.csv''') , train_memory_csv_file=os.path.join(UpperCAmelCase__ , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(UpperCAmelCase__ , '''inf_mem.csv''') , train_time_csv_file=os.path.join(UpperCAmelCase__ , '''train_time.csv''') , env_info_csv_file=os.path.join(UpperCAmelCase__ , '''env.csv''') , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''env.csv''')).exists())
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(UpperCAmelCase__ : Tuple):
self.assertTrue(hasattr(UpperCAmelCase__ , '''sequential'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''cumulative'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''current'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase__ , '''log.txt''') , log_print=UpperCAmelCase__ , trace_memory_line_by_line=UpperCAmelCase__ , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''log.txt''')).exists())
| 362 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCamelCase : Optional[Any] = """facebook/wmt19-en-de"""
_lowerCamelCase : Optional[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCamelCase : int = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCamelCase : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCamelCase : int = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
_lowerCamelCase : int = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
_lowerCamelCase : str = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 231 | 0 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _snake_case ( _snake_case : Dict , _snake_case : int ):
lowerCAmelCase : Dict = k_size // 2
lowerCAmelCase, lowerCAmelCase : int = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(_snake_case ) + square(_snake_case )) / (2 * square(_snake_case )) )
return g
def _snake_case ( _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase : str = height - k_size + 1
lowerCAmelCase : Any = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase : int = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase : Optional[int] = 0
for i, j in product(range(_snake_case ) , range(_snake_case ) ):
lowerCAmelCase : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase : Dict = gen_gaussian_kernel(_snake_case , _snake_case )
lowerCAmelCase : Union[str, Any] = ravel(_snake_case )
# reshape and get the dst image
lowerCAmelCase : Any = dot(_snake_case , _snake_case ).reshape(_snake_case , _snake_case ).astype(_snake_case )
return dst
if __name__ == "__main__":
# read original image
snake_case__ : Dict = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
snake_case__ : int = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
snake_case__ : Optional[int] = gaussian_filter(gray, 3, sigma=1)
snake_case__ : Union[str, Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 60 |
"""simple docstring"""
import numpy as np
def _snake_case ( _snake_case : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
# Return True if there is node that has not iterated.
lowercase__ = [False] * len(__magic_name__ )
lowercase__ = [s]
lowercase__ = True
while queue:
lowercase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase__ = True
lowercase__ = u
return visited[t]
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = [-1] * (len(__magic_name__ ))
lowercase__ = 0
lowercase__ = []
lowercase__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = float("Inf" )
lowercase__ = sink
while s != source:
# Find the minimum value in select path
lowercase__ = min(__magic_name__ , graph[parent[s]][s] )
lowercase__ = parent[s]
max_flow += path_flow
lowercase__ = sink
while v != source:
lowercase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase__ = parent[v]
for i in range(len(__magic_name__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 201 |
def _A ( __magic_name__ ):
lowercase__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _A ( __magic_name__ ):
lowercase__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowercase__ = remove_duplicates(key.upper() )
lowercase__ = len(__magic_name__ )
# First fill cipher with key characters
lowercase__ = {alphabet[i]: char for i, char in enumerate(__magic_name__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__magic_name__ ) , 26 ):
lowercase__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowercase__ = alphabet[i - offset]
lowercase__ = char
return cipher_alphabet
def _A ( __magic_name__ , __magic_name__ ):
return "".join(cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _A ( ):
lowercase__ = input("Enter message to encode or decode: " ).strip()
lowercase__ = input("Enter keyword: " ).strip()
lowercase__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
lowercase__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
lowercase__ = create_cipher_map(__magic_name__ )
print(func(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 201 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__lowercase : Tuple = sys.version_info >= (3, 10)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = 42
A_ = 42
A_ = 42
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class __UpperCamelCase :
A_ = False
A_ = True
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "titi"
A_ = "toto"
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "titi"
A_ = "toto"
A_ = 42
@dataclass
class __UpperCamelCase :
A_ = "toto"
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = BasicEnum(self.foo )
@dataclass
class __UpperCamelCase :
A_ = "toto"
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = MixedTypeEnum(self.foo )
@dataclass
class __UpperCamelCase :
A_ = None
A_ = field(default=lowerCAmelCase_ , metadata={"help": "help message"} )
A_ = None
A_ = list_field(default=[] )
A_ = list_field(default=[] )
@dataclass
class __UpperCamelCase :
A_ = list_field(default=[] )
A_ = list_field(default=[1, 2, 3] )
A_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
A_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __UpperCamelCase :
A_ = field()
A_ = field()
A_ = field()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = BasicEnum(self.required_enum )
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = field()
A_ = None
A_ = field(default="toto" , metadata={"help": "help message"} )
A_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class __UpperCamelCase :
A_ = False
A_ = True
A_ = None
@dataclass
class __UpperCamelCase :
A_ = None
A_ = field(default=lowerCAmelCase_ , metadata={"help": "help message"} )
A_ = None
A_ = list_field(default=[] )
A_ = list_field(default=[] )
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__a : Dict = {k: v for k, v in vars(__a ).items() if k != 'container'}
__a : Tuple = {k: v for k, v in vars(__a ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , __a ) and yy.get('choices' , __a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](__a ) , yy['type'](__a ) )
del xx["type"], yy["type"]
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = HfArgumentParser(__a )
__a : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , required=__a )
expected.add_argument('--bar' , type=__a , required=__a )
expected.add_argument('--baz' , type=__a , required=__a )
expected.add_argument('--flag' , type=__a , default=__a , const=__a , nargs='?' )
self.argparsersEqual(__a , __a )
__a : Optional[int] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((__a) , ) : Any = parser.parse_args_into_dataclasses(__a , look_for_args_file=__a )
self.assertFalse(example.flag )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = HfArgumentParser(__a )
__a : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=__a )
expected.add_argument('--baz' , default='toto' , type=__a , help='help message' )
self.argparsersEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , default=__a , const=__a , nargs='?' )
expected.add_argument('--baz' , type=__a , default=__a , const=__a , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=__a , dest='baz' )
expected.add_argument('--opt' , type=__a , default=__a )
__a : str = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
__a : Dict = HfArgumentParser(__a )
self.argparsersEqual(__a , __a )
__a : List[Any] = parser.parse_args([] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : Tuple = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : List[Any] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : int = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : Optional[int] = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = HfArgumentParser(__a )
__a : str = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__a , __a )
__a : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__a : Tuple = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__a : List[str] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__a : Tuple = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__a : Optional[int] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
__a : Any = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCAmelCase ( self ):
'''simple docstring'''
@dataclass
class __UpperCamelCase :
A_ = "toto"
__a : Dict = HfArgumentParser(__a )
__a : List[str] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__a , __a )
__a : str = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__a : Optional[int] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__a : Any = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = HfArgumentParser(__a )
__a : Any = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__a )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__a )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__a )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__a )
self.argparsersEqual(__a , __a )
__a : Union[str, Any] = parser.parse_args([] )
self.assertEqual(
__a , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
__a : Tuple = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(__a , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = argparse.ArgumentParser()
expected.add_argument('--foo' , default=__a , type=__a )
expected.add_argument('--bar' , default=__a , type=__a , help='help message' )
expected.add_argument('--baz' , default=__a , type=__a )
expected.add_argument('--ces' , nargs='+' , default=[] , type=__a )
expected.add_argument('--des' , nargs='+' , default=[] , type=__a )
__a : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
__a : Tuple = HfArgumentParser(__a )
self.argparsersEqual(__a , __a )
__a : Tuple = parser.parse_args([] )
self.assertEqual(__a , Namespace(foo=__a , bar=__a , baz=__a , ces=[] , des=[] ) )
__a : Tuple = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(__a , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = HfArgumentParser(__a )
__a : str = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=__a , required=__a )
expected.add_argument('--required_str' , type=__a , required=__a )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__a , )
self.argparsersEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = HfArgumentParser(__a )
__a : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , required=__a )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__a , )
expected.add_argument('--opt' , type=__a , default=__a )
expected.add_argument('--baz' , default='toto' , type=__a , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__a )
self.argparsersEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = HfArgumentParser(__a )
__a : Optional[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
__a : Optional[Any] = parser.parse_dict(__a )[0]
__a : str = BasicExample(**__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = HfArgumentParser(__a )
__a : str = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(__a , parser.parse_dict , __a , allow_extra_keys=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = HfArgumentParser(__a )
__a : Optional[int] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__a : List[str] = os.path.join(__a , 'temp_json' )
os.mkdir(__a )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(__a , __a )
__a : Tuple = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
__a : str = BasicExample(**__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = HfArgumentParser(__a )
__a : Union[str, Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__a : List[str] = os.path.join(__a , 'temp_yaml' )
os.mkdir(__a )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(__a , __a )
__a : List[str] = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
__a : Dict = BasicExample(**__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = HfArgumentParser(__a )
self.assertIsNotNone(__a )
| 27 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 0 |
"""simple docstring"""
__lowercase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowercase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowercase = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
"""simple docstring"""
assert len(str(a__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__UpperCamelCase =year // 1_0_0
__UpperCamelCase =(5 * (century % 4) + 2) % 7
__UpperCamelCase =year % 1_0_0
__UpperCamelCase =centurian % 1_2
__UpperCamelCase =(
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__UpperCamelCase =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__UpperCamelCase =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 | """simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowercase = '''\
Text data.
Second line of data.'''
__lowercase = '''file'''
@pytest.fixture(scope='''session''' )
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__UpperCamelCase =bytes(__UpperCamelCase , '''utf-8''' )
with zstd.open(__UpperCamelCase , '''wb''' ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , __UpperCamelCase ) , '''w''' ) as f:
f.write(__UpperCamelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__UpperCamelCase =input_paths[compression_format]
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase =DownloadConfig(cache_dir=__UpperCamelCase , extract_compressed_file=__UpperCamelCase )
__UpperCamelCase =cached_path(__UpperCamelCase , download_config=__UpperCamelCase )
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def lowerCAmelCase (__UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase ='''custom_cache'''
__UpperCamelCase ='''custom_extracted_dir'''
__UpperCamelCase =tmp_path / '''custom_extracted_path'''
if default_extracted:
__UpperCamelCase =('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __UpperCamelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__UpperCamelCase ) )
__UpperCamelCase =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__UpperCamelCase =xz_file
__UpperCamelCase =(
DownloadConfig(extract_compressed_file=__UpperCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__UpperCamelCase )
)
__UpperCamelCase =cached_path(__UpperCamelCase , download_config=__UpperCamelCase )
assert Path(__UpperCamelCase ).parent.parts[-2:] == expected
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =str(Path(__UpperCamelCase ).resolve() )
assert cached_path(__UpperCamelCase ) == text_file
# relative path
__UpperCamelCase =str(Path(__UpperCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__UpperCamelCase ) == text_file
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__UpperCamelCase ):
cached_path(__UpperCamelCase )
# relative path
__UpperCamelCase ='''./__missing_file__.txt'''
with pytest.raises(__UpperCamelCase ):
cached_path(__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase ():
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
http_get('''https://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
fsspec_head('''s3://huggingface.co''' )
| 85 | 0 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCAmelCase = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__snake_case ) )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(UpperCAmelCase , cache_dir=UpperCAmelCase )
_snake_case = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase )
_snake_case = builder_cls(
cache_dir=UpperCAmelCase , config_name=UpperCAmelCase , hash=dataset_module.hash , )
_snake_case = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCAmelCase ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(UpperCAmelCase , cache_dir=UpperCAmelCase )
self.assertTrue(os.path.exists(UpperCAmelCase ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
_snake_case = dataset_module_factory("""wikipedia""" , cache_dir=_SCREAMING_SNAKE_CASE )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=_SCREAMING_SNAKE_CASE , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = dataset_module_factory("""wikipedia""" , cache_dir=_SCREAMING_SNAKE_CASE )
_snake_case = import_main_class(dataset_module.module_path , dataset=_SCREAMING_SNAKE_CASE )
_snake_case = builder_cls(
cache_dir=_SCREAMING_SNAKE_CASE , config_name="""20220301.frr""" , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert "train" in ds
assert isinstance(ds["""train"""] , _SCREAMING_SNAKE_CASE )
assert next(iter(ds["""train"""] ) ) | 341 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = f"""class {class_name}("""
_snake_case = f"""{4 * " "}def {test_name}("""
_snake_case = f"""{8 * " "}{correct_line.split()[0]}"""
_snake_case = f"""{16 * " "}{correct_line.split()[0]}"""
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = 0
_snake_case = 0
_snake_case = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
_snake_case = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
_snake_case = _snake_case = _snake_case = _snake_case = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = {l.strip() for l in f.readlines()}
else:
_snake_case = None
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
_snake_case, _snake_case, _snake_case, _snake_case = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__lowerCAmelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 341 | 1 |
import argparse
import os
import re
import packaging.version
__a :Union[str, Any] = 'examples/'
__a :List[str] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
__a :Optional[Any] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
__a :str = 'README.md'
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.read()
A_ , A_ = REPLACE_PATTERNS[pattern]
A_ = replace.replace("VERSION" ,__UpperCamelCase )
A_ = re_pattern.sub(__UpperCamelCase ,__UpperCamelCase )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.write(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ,pattern="examples" )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __snake_case ( ):
"""simple docstring"""
A_ = "🤗 Transformers currently provides the following architectures"
A_ = "1. Want to contribute a new model?"
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.readlines()
# Find the start of the list.
A_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
A_ = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" ,"https://huggingface.co/docs/transformers/model_doc" ,)
index += 1
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(__UpperCamelCase )
def __snake_case ( ):
"""simple docstring"""
with open(REPLACE_FILES["init"] ,"r" ) as f:
A_ = f.read()
A_ = REPLACE_PATTERNS["init"][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Union[str, Any]=False ):
"""simple docstring"""
A_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
A_ = default_version.base_version
elif patch:
A_ = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
A_ = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
A_ = input(f'''Which version are you releasing? [{default_version}]''' )
if len(__UpperCamelCase ) == 0:
A_ = default_version
print(f'''Updating version to {version}.''' )
global_version_update(__UpperCamelCase ,patch=__UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __snake_case ( ):
"""simple docstring"""
A_ = get_version()
A_ = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
A_ = current_version.base_version
# Check with the user we got that right.
A_ = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(__UpperCamelCase ) == 0:
A_ = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(__UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__a :List[str] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__a :str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work() | 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 ) | 329 | 0 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=False ) -> Any:
try:
_snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_snake_case = default
else:
# KEY is set, convert it to True or False.
try:
_snake_case = strtobool(lowercase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Any:
return unittest.skip('''Test was skipped''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> Optional[int]:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Tuple:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> Dict:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> Tuple:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> str:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> Tuple:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> Dict:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Optional[Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Any:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> str:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> Any:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Union[str, Any]:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None ) -> List[Any]:
if test_case is None:
return partial(lowercase__ , version=lowercase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowercase__ ) , f'''test requires torch version >= {version}''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Optional[int]:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> Tuple:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowercase__ )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> Optional[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowercase__ )
class lowerCAmelCase__ ( unittest.TestCase ):
__a = True
@classmethod
def lowercase ( cls : Optional[Any] ):
_snake_case = tempfile.mkdtemp()
@classmethod
def lowercase ( cls : List[Any] ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowercase ( self : List[Any] ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase__ )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : List[str] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : str , _lowerCamelCase : Union[mock.Mock, List[mock.Mock]] ):
_snake_case = mocks if isinstance(lowerCAmelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[str]:
_snake_case = AcceleratorState()
_snake_case = tensor[None].clone().to(state.device )
_snake_case = gather(lowercase__ ).cpu()
_snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowercase__ ):
return False
return True
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : str ):
_snake_case = returncode
_snake_case = stdout
_snake_case = stderr
async def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ) -> Any:
while True:
_snake_case = await stream.readline()
if line:
callback(lowercase__ )
else:
break
async def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=False ) -> str:
if echo:
print('''\nRunning: ''' , ''' '''.join(lowercase__ ) )
_snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowercase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowercase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_snake_case = []
_snake_case = []
def tee(__lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]="" ):
_snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(lowercase__ )
if not quiet:
print(lowercase__ , lowercase__ , file=lowercase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __lowerCamelCase : tee(lowercase__ , lowercase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __lowerCamelCase : tee(lowercase__ , lowercase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowercase__ , )
return _RunOutput(await p.wait() , lowercase__ , lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=1_80 , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=True ) -> int:
_snake_case = asyncio.get_event_loop()
_snake_case = loop.run_until_complete(
_stream_subprocess(lowercase__ , env=lowercase__ , stdin=lowercase__ , timeout=lowercase__ , quiet=lowercase__ , echo=lowercase__ ) )
_snake_case = ''' '''.join(lowercase__ )
if result.returncode > 0:
_snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class lowerCAmelCase__ ( A__ ):
pass
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any]=False ) -> Any:
try:
_snake_case = subprocess.check_output(lowercase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowercase__ , '''decode''' ):
_snake_case = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(lowercase__ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 288 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(__A )
UpperCAmelCase__ = flatten_dict(__A )
return flax_params
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = {}
UpperCAmelCase__ = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
UpperCAmelCase__ = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCAmelCase__ = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCAmelCase__ = new_key.replace(__A, __A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCAmelCase__ = new_key.replace(__A, __A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCAmelCase__ = re.sub(r"layers_(\d+)", r"layer.\1", __A )
UpperCAmelCase__ = new_key.replace("encoder", "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCAmelCase__ = re.sub(r"layers_(\d+)", r"layer.\1", __A )
UpperCAmelCase__ = flax_dict[key]
UpperCAmelCase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCAmelCase__ = torch.from_numpy(converted_dict[key].T )
else:
UpperCAmelCase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCAmelCase_ ( __A, __A, __A=False, __A=False ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = get_flax_param(__A )
if not use_large:
UpperCAmelCase__ = PixaStructVisionConfig()
UpperCAmelCase__ = PixaStructTextConfig()
else:
UpperCAmelCase__ = PixaStructVisionConfig(
hidden_size=1_536, d_ff=3_968, num_attention_heads=24, num_hidden_layers=18 )
UpperCAmelCase__ = PixaStructTextConfig(hidden_size=1_536, d_ff=3_968, num_heads=24, num_layers=18 )
UpperCAmelCase__ = PixaStructConfig(
vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=__A )
UpperCAmelCase__ = PixaStructForConditionalGeneration(__A )
UpperCAmelCase__ = rename_and_convert_flax_params(__A )
model.load_state_dict(__A )
UpperCAmelCase__ = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
UpperCAmelCase__ = PixaStructImageProcessor()
UpperCAmelCase__ = PixaStructProcessor(image_processor=__A, tokenizer=__A )
if use_large:
UpperCAmelCase__ = 4_096
UpperCAmelCase__ = True
# mkdir if needed
os.makedirs(__A, exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
print("Model saved in {}".format(__A ) )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
UpperCamelCase__ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 143 | def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''▁'''
_lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
_lowerCAmelCase = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Optional[int] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token
lowerCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
lowerCAmelCase__ : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : Dict = len(self.sp_model ) + self.fairseq_offset
lowerCAmelCase__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> int:
lowerCAmelCase__ : List[str] = self.__dict__.copy()
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : List[Any] = [self.cls_token_id]
lowerCAmelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : List[Any] = [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self ) -> str:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ : Dict = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Tuple = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase ,""" """ ).strip()
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Optional[Any] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 37 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''CompVis/stable-diffusion-v1-1'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
_SCREAMING_SNAKE_CASE : int = '''CompVis/stable-diffusion-v1-3'''
_SCREAMING_SNAKE_CASE : str = '''CompVis/stable-diffusion-v1-4'''
class a ( __snake_case ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : AutoencoderKL , __SCREAMING_SNAKE_CASE : CLIPTextModel , __SCREAMING_SNAKE_CASE : CLIPTokenizer , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , __SCREAMING_SNAKE_CASE : CLIPImageProcessor , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
super()._init_()
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline(
vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , requires_safety_checker=__SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCamelCase ( self : List[str] ) -> Dict[str, Any]:
return {k: getattr(self , __SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith('_' )}
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any ) -> List[Any]:
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : int , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Optional[int]:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : int , ) -> str:
lowerCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(__SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 183 | 0 |
'''simple docstring'''
from typing import Any
def _snake_case ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , ) -> list:
"""simple docstring"""
_validation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
# Creates data structures and fill initial step
lowerCAmelCase = {}
lowerCAmelCase = {}
for state in states_space:
lowerCAmelCase = observations_space[0]
lowerCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = observations_space[o]
lowerCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase = """"""
lowerCAmelCase = -1
for k_state in states_space:
lowerCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase = probability
lowerCAmelCase = k_state
# Update probabilities and pointers dicts
lowerCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase = arg_max
# The final observation
lowerCAmelCase = observations_space[len(_SCREAMING_SNAKE_CASE ) - 1]
# argmax for given final observation
lowerCAmelCase = """"""
lowerCAmelCase = -1
for k_state in states_space:
lowerCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase = probability
lowerCAmelCase = k_state
lowerCAmelCase = arg_max
# Process pointers backwards
lowerCAmelCase = last_state
lowerCAmelCase = []
for o in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
result.append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ) -> None:
"""simple docstring"""
_validate_not_empty(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
_validate_lists(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_validate_dicts(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ) -> None:
"""simple docstring"""
_validate_list(_SCREAMING_SNAKE_CASE , """observations_space""" )
_validate_list(_SCREAMING_SNAKE_CASE , """states_space""" )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = f'{var_name} must be a list'
raise ValueError(_SCREAMING_SNAKE_CASE )
else:
for x in _object:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = f'{var_name} must be a list of strings'
raise ValueError(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ) -> None:
"""simple docstring"""
_validate_dict(_SCREAMING_SNAKE_CASE , """initial_probabilities""" , _SCREAMING_SNAKE_CASE )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , """transition_probabilities""" )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , """emission_probabilities""" )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
_validate_dict(_object , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for x in _object.values():
_validate_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : type , _SCREAMING_SNAKE_CASE : bool = False ) -> None:
"""simple docstring"""
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = f'{var_name} must be a dict'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object ):
lowerCAmelCase = f'{var_name} all keys must be strings'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values() ):
lowerCAmelCase = """nested dictionary """ if nested else """"""
lowerCAmelCase = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod() | 187 |
'''simple docstring'''
import numpy
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_ ) -> None:
lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase = numpy.zeros(output_array.shape )
def __snake_case ( self ) -> numpy.ndarray:
lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __snake_case ( self ) -> None:
lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __snake_case ( self , A_ , A_ , A_ ) -> None:
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __snake_case ( self , A_ ) -> int:
lowerCAmelCase = input_arr
lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case ( _SCREAMING_SNAKE_CASE : numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def _snake_case ( _SCREAMING_SNAKE_CASE : numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def _snake_case ( ) -> int:
"""simple docstring"""
lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=10 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 187 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__lowercase = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=UpperCAmelCase__, cache_dir=UpperCAmelCase__ )
__lowercase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase__, os.listdir(UpperCAmelCase__ )[0], "snapshots" ) )]
__lowercase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=UpperCAmelCase__ )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 4
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
__lowercase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase__ ) == num_samples
def _lowercase ( self : int ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=UpperCAmelCase__ )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__ )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def _lowercase ( self : Optional[int] ):
__lowercase = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=UpperCAmelCase__, steps_offset=1, )
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, )
__lowercase = scheduler.create_state()
__lowercase = scheduler_state
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def _lowercase ( self : Dict ):
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = jax.random.split(jax.random.PRNGKey(0 ), UpperCAmelCase__ )
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__, )
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__, use_memory_efficient_attention=UpperCAmelCase__, )
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 17 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a = 'CompVis/stable-diffusion-v1-1'
_a = 'CompVis/stable-diffusion-v1-2'
_a = 'CompVis/stable-diffusion-v1-3'
_a = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ):
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _lowercase ( self : List[str] ):
return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 17 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : List[str] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = SamImageProcessor()
_lowerCAmelCase : List[Any] = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **_UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Any = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[str] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : List[str] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
_lowerCAmelCase : int = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : str = self.get_image_processor()
_lowerCAmelCase : Union[str, Any] = SamProcessor(image_processor=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = image_processor(_UpperCAmelCase , return_tensors="""np""" )
_lowerCAmelCase : int = processor(images=_UpperCAmelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = SamProcessor(image_processor=_UpperCAmelCase )
_lowerCAmelCase : int = [torch.ones((1, 3, 5, 5) )]
_lowerCAmelCase : List[Any] = [[1764, 2646]]
_lowerCAmelCase : Union[str, Any] = [[683, 1024]]
_lowerCAmelCase : Any = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : Optional[Any] = processor.post_process_masks(
_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_lowerCAmelCase : Tuple = [np.ones((1, 3, 5, 5) )]
_lowerCAmelCase : List[Any] = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Optional[Any] = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Optional[Any] = SamImageProcessor()
_lowerCAmelCase : Any = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **_UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Any = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Dict = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : int = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
_lowerCAmelCase : str = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = SamProcessor(image_processor=_UpperCAmelCase )
_lowerCAmelCase : Tuple = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = image_processor(_UpperCAmelCase , return_tensors="""np""" )
_lowerCAmelCase : List[str] = processor(images=_UpperCAmelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = SamProcessor(image_processor=_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = [tf.ones((1, 3, 5, 5) )]
_lowerCAmelCase : Tuple = [[1764, 2646]]
_lowerCAmelCase : List[Any] = [[683, 1024]]
_lowerCAmelCase : int = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : int = processor.post_process_masks(
_UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_lowerCAmelCase : Any = [np.ones((1, 3, 5, 5) )]
_lowerCAmelCase : Optional[int] = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : int = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_lowerCAmelCase : Dict = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : Optional[Any] = SamImageProcessor()
_lowerCAmelCase : List[Any] = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **_UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = SamProcessor(image_processor=_UpperCAmelCase )
_lowerCAmelCase : Dict = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_lowerCAmelCase : Union[str, Any] = [tf.convert_to_tensor(_UpperCAmelCase )]
_lowerCAmelCase : int = [torch.tensor(_UpperCAmelCase )]
_lowerCAmelCase : Dict = [[1764, 2646]]
_lowerCAmelCase : Any = [[683, 1024]]
_lowerCAmelCase : Tuple = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors="""tf""" )
_lowerCAmelCase : int = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : List[str] = SamProcessor(image_processor=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : int = image_processor(_UpperCAmelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
_lowerCAmelCase : Dict = processor(images=_UpperCAmelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
_lowerCAmelCase : Union[str, Any] = image_processor(_UpperCAmelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
_lowerCAmelCase : Tuple = processor(images=_UpperCAmelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
| 159 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowerCamelCase : Tuple = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowerCamelCase : List[str] = get_tests_dir("fixtures/vocab.json")
_lowerCamelCase : str = get_tests_dir("fixtures")
class __snake_case (unittest.TestCase ):
lowerCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Any = 0
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : List[Any] = WavaVecaConfig()
_lowerCAmelCase : str = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """vocab.json""" ) )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Any = WavaVecaFeatureExtractor()
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_lowerCAmelCase : List[str] = WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """r""" ) as f:
_lowerCAmelCase : Union[str, Any] = json.load(_UpperCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
_lowerCAmelCase : List[Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = WavaVecaFeatureExtractor()
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_lowerCAmelCase : str = WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """r""" ) as f:
_lowerCAmelCase : str = json.load(_UpperCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Tuple = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(_UpperCAmelCase )
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write("""{}""" )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Any = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
_lowerCAmelCase : Optional[int] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
_lowerCAmelCase : Union[str, Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
_lowerCAmelCase : List[str] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase : List[str] = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Tuple = os.path.join(_UpperCAmelCase , """vocab.txt""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : str = CustomTokenizer(_UpperCAmelCase )
_lowerCAmelCase : List[str] = CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
'''simple docstring'''
class __snake_case (_a ):
lowerCAmelCase__ = False
class __snake_case (_a ):
lowerCAmelCase__ = False
class __snake_case (_a ):
lowerCAmelCase__ = "AutoFeatureExtractor"
lowerCAmelCase__ = "AutoTokenizer"
lowerCAmelCase__ = False
try:
AutoConfig.register("""custom""" , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# If remote code is not set, the default is to use local classes.
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_lowerCAmelCase : str = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class __snake_case (unittest.TestCase ):
lowerCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple ) -> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , """test-processor""" ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
_lowerCAmelCase : str = WavaVecaProcessor.from_pretrained(f"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : int = WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , """test-processor-org""" ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token , organization="""valid_org""" , )
_lowerCAmelCase : str = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_lowerCAmelCase : Any = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : int = os.path.join(_UpperCAmelCase , """vocab.txt""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : List[str] = CustomTokenizer(_UpperCAmelCase )
_lowerCAmelCase : List[str] = CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"{USER}/test-dynamic-processor" , token=self._token )
_lowerCAmelCase : Union[str, Any] = Repository(_UpperCAmelCase , clone_from=f"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(_UpperCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_UpperCAmelCase , """tokenizer_config.json""" ) ) as f:
_lowerCAmelCase : str = json.load(_UpperCAmelCase )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_processing.py""" ) ) )
repo.push_to_hub()
_lowerCAmelCase : Tuple = AutoProcessor.from_pretrained(f"{USER}/test-dynamic-processor" , trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 159 | 1 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
random.seed(__UpperCamelCase )
np.random.seed(__UpperCamelCase )
torch.manual_seed(__UpperCamelCase )
torch.cuda.manual_seed_all(__UpperCamelCase )
# ^^ safe to call this function even if cuda is not available
class _a :
'''simple docstring'''
def __init__( self, A, A = 0.99_99, A = 0.0, A = 0, A = False, A = 1.0, A = 2 / 3, A = None, A = None, **A, ):
'''simple docstring'''
if isinstance(A, torch.nn.Module ):
SCREAMING_SNAKE_CASE : str = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`', '1.0.0', A, standard_warn=A, )
SCREAMING_SNAKE_CASE : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
SCREAMING_SNAKE_CASE : int = True
if kwargs.get('max_value', A ) is not None:
SCREAMING_SNAKE_CASE : Tuple = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value', '1.0.0', A, standard_warn=A )
SCREAMING_SNAKE_CASE : int = kwargs['max_value']
if kwargs.get('min_value', A ) is not None:
SCREAMING_SNAKE_CASE : List[Any] = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value', '1.0.0', A, standard_warn=A )
SCREAMING_SNAKE_CASE : Any = kwargs['min_value']
SCREAMING_SNAKE_CASE : Union[str, Any] = list(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = [p.clone().detach() for p in parameters]
if kwargs.get('device', A ) is not None:
SCREAMING_SNAKE_CASE : Tuple = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device', '1.0.0', A, standard_warn=A )
self.to(device=kwargs['device'] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = decay
SCREAMING_SNAKE_CASE : Optional[Any] = min_decay
SCREAMING_SNAKE_CASE : Dict = update_after_step
SCREAMING_SNAKE_CASE : Tuple = use_ema_warmup
SCREAMING_SNAKE_CASE : Dict = inv_gamma
SCREAMING_SNAKE_CASE : Any = power
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : int = None # set in `step()`
SCREAMING_SNAKE_CASE : Union[str, Any] = model_cls
SCREAMING_SNAKE_CASE : List[str] = model_config
@classmethod
def UpperCamelCase_ ( cls, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = model_cls.load_config(A, return_unused_kwargs=A )
SCREAMING_SNAKE_CASE : int = model_cls.from_pretrained(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = cls(model.parameters(), model_cls=A, model_config=model.config )
ema_model.load_state_dict(A )
return ema_model
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
SCREAMING_SNAKE_CASE : List[Any] = self.model_cls.from_config(self.model_config )
SCREAMING_SNAKE_CASE : Any = self.state_dict()
state_dict.pop('shadow_params', A )
model.register_to_config(**A )
self.copy_to(model.parameters() )
model.save_pretrained(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = max(0, optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
SCREAMING_SNAKE_CASE : List[Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 + step) / (10 + step)
SCREAMING_SNAKE_CASE : Optional[int] = min(A, self.decay )
# make sure decay is not smaller than min_decay
SCREAMING_SNAKE_CASE : Tuple = max(A, self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if isinstance(A, torch.nn.Module ):
SCREAMING_SNAKE_CASE : List[str] = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`', '1.0.0', A, standard_warn=A, )
SCREAMING_SNAKE_CASE : str = parameters.parameters()
SCREAMING_SNAKE_CASE : Tuple = list(A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
SCREAMING_SNAKE_CASE : Any = self.get_decay(self.optimization_step )
SCREAMING_SNAKE_CASE : str = decay
SCREAMING_SNAKE_CASE : Optional[int] = 1 - decay
SCREAMING_SNAKE_CASE : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params, A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
SCREAMING_SNAKE_CASE : int = deepspeed.zero.GatheredParameters(A, modifier_rank=A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = list(A )
for s_param, param in zip(self.shadow_params, A ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [
p.to(device=A, dtype=A ) if p.is_floating_point() else p.to(device=A )
for p in self.shadow_params
]
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [param.detach().cpu().clone() for param in parameters]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params, A ):
param.data.copy_(c_param.data )
# Better memory-wise.
SCREAMING_SNAKE_CASE : List[str] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(A )
SCREAMING_SNAKE_CASE : str = state_dict.get('decay', self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.get('min_decay', self.min_decay )
if not isinstance(self.min_decay, A ):
raise ValueError('Invalid min_decay' )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.get('optimization_step', self.optimization_step )
if not isinstance(self.optimization_step, A ):
raise ValueError('Invalid optimization_step' )
SCREAMING_SNAKE_CASE : Dict = state_dict.get('update_after_step', self.update_after_step )
if not isinstance(self.update_after_step, A ):
raise ValueError('Invalid update_after_step' )
SCREAMING_SNAKE_CASE : List[str] = state_dict.get('use_ema_warmup', self.use_ema_warmup )
if not isinstance(self.use_ema_warmup, A ):
raise ValueError('Invalid use_ema_warmup' )
SCREAMING_SNAKE_CASE : List[Any] = state_dict.get('inv_gamma', self.inv_gamma )
if not isinstance(self.inv_gamma, (float, int) ):
raise ValueError('Invalid inv_gamma' )
SCREAMING_SNAKE_CASE : Tuple = state_dict.get('power', self.power )
if not isinstance(self.power, (float, int) ):
raise ValueError('Invalid power' )
SCREAMING_SNAKE_CASE : str = state_dict.get('shadow_params', A )
if shadow_params is not None:
SCREAMING_SNAKE_CASE : Tuple = shadow_params
if not isinstance(self.shadow_params, A ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(A, torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 251 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Union[str, Any] = '''linear'''
A : int = '''cosine'''
A : Optional[Any] = '''cosine_with_restarts'''
A : Optional[int] = '''polynomial'''
A : str = '''constant'''
A : Union[str, Any] = '''constant_with_warmup'''
A : Optional[Any] = '''piecewise_constant'''
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
return LambdaLR(__UpperCamelCase ,lambda __UpperCamelCase : 1 ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1.0 ,__UpperCamelCase ) )
return 1.0
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: str ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Optional[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = rule_str.split(':' )
SCREAMING_SNAKE_CASE : int = int(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = float(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = value
SCREAMING_SNAKE_CASE : Any = float(rule_list[-1] )
def create_rules_function(__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Any] ):
def rule_func(__UpperCamelCase: int ) -> float:
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE : Any = create_rules_function(__UpperCamelCase ,__UpperCamelCase )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,last_epoch=__UpperCamelCase )
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: int=-1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
return max(
0.0 ,float(num_training_steps - current_step ) / float(max(1 ,num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 0.5 ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: Any ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : str = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * float(__UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optimizer ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int = 1 ,__UpperCamelCase: int = -1 ):
"""simple docstring"""
def lr_lambda(__UpperCamelCase: Dict ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : int = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Any ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Optional[Any]=1e-7 ,__UpperCamelCase: Dict=1.0 ,__UpperCamelCase: Optional[Any]=-1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__UpperCamelCase: int ):
if current_step < num_warmup_steps:
return float(__UpperCamelCase ) / float(max(1 ,__UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE : List[str] = lr_init - lr_end
SCREAMING_SNAKE_CASE : Optional[Any] = num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
UpperCamelCase_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowercase__( __UpperCamelCase: Union[str, SchedulerType] ,__UpperCamelCase: Optimizer ,__UpperCamelCase: Optional[str] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: int = 1 ,__UpperCamelCase: float = 1.0 ,__UpperCamelCase: int = -1 ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = SchedulerType(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCamelCase ,last_epoch=__UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCamelCase ,step_rules=__UpperCamelCase ,last_epoch=__UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,last_epoch=__UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,num_cycles=__UpperCamelCase ,last_epoch=__UpperCamelCase ,)
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,power=__UpperCamelCase ,last_epoch=__UpperCamelCase ,)
return schedule_func(
__UpperCamelCase ,num_warmup_steps=__UpperCamelCase ,num_training_steps=__UpperCamelCase ,last_epoch=__UpperCamelCase )
| 251 | 1 |
"""simple docstring"""
_A = 8.3_144_598
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> float:
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_A = 3_00
_A = 28
_A = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 166 |
"""simple docstring"""
from math import factorial, radians
def a__ ( lowerCAmelCase , lowerCAmelCase = 18 , lowerCAmelCase = 10 ) -> float:
UpperCAmelCase__ : List[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCAmelCase__ : Any = radians(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = angle_in_radians
UpperCAmelCase__ : Tuple = 3
UpperCAmelCase__ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
UpperCAmelCase__ : int = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 166 | 1 |
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = {} # Mapping from char to TrieNode
SCREAMING_SNAKE_CASE_ = False
def _UpperCamelCase ( self , _A ) -> Any:
for word in words:
self.insert(a_ )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self
for char in word:
if char not in curr.nodes:
SCREAMING_SNAKE_CASE_ = TrieNode()
SCREAMING_SNAKE_CASE_ = curr.nodes[char]
SCREAMING_SNAKE_CASE_ = True
def _UpperCamelCase ( self , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self
for char in word:
if char not in curr.nodes:
return False
SCREAMING_SNAKE_CASE_ = curr.nodes[char]
return curr.is_leaf
def _UpperCamelCase ( self , _A ) -> List[str]:
def _delete(_A , _A , _A ) -> bool:
if index == len(a_ ):
# If word does not exist
if not curr.is_leaf:
return False
SCREAMING_SNAKE_CASE_ = False
return len(curr.nodes ) == 0
SCREAMING_SNAKE_CASE_ = word[index]
SCREAMING_SNAKE_CASE_ = curr.nodes.get(a_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
SCREAMING_SNAKE_CASE_ = _delete(a_ , a_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , a_ , 0 )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
if node.is_leaf:
print(__UpperCamelCase, end=''' ''' )
for key, value in node.nodes.items():
print_words(__UpperCamelCase, word + key )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = "banana bananas bandana band apple all beast".split()
SCREAMING_SNAKE_CASE_ = TrieNode()
root.insert_many(__UpperCamelCase )
# print_words(root, "")
assert all(root.find(__UpperCamelCase ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def A__ ( __lowerCamelCase, __lowerCamelCase ):
print(str(__UpperCamelCase ), '''works!''' if passes else '''doesn\'t work :(''' )
def A__ ( ):
assert test_trie()
def A__ ( ):
print_results('''Testing trie functionality''', test_trie() )
if __name__ == "__main__":
main()
| 299 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : int , a_ : Optional[int]=13 , a_ : Optional[Any]=7 , a_ : Tuple=True , a_ : Optional[int]=True , a_ : List[str]=True , a_ : Union[str, Any]=True , a_ : List[Any]=99 , a_ : List[Any]=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : Any=37 , a_ : int="gelu" , a_ : Any=0.1 , a_ : Union[str, Any]=0.1 , a_ : Dict=5_12 , a_ : Union[str, Any]=16 , a_ : Optional[int]=2 , a_ : Dict=0.02 , a_ : List[str]=False , a_ : str=True , a_ : Any="None" , a_ : Dict=3 , a_ : List[str]=4 , a_ : Optional[Any]=None , ):
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : Any = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : List[Any] = use_input_mask
lowerCAmelCase_ : str = use_token_type_ids
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : Dict = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[Any] = num_labels
lowerCAmelCase_ : List[Any] = num_choices
lowerCAmelCase_ : Optional[Any] = relative_attention
lowerCAmelCase_ : Optional[int] = position_biased_input
lowerCAmelCase_ : Union[str, Any] = pos_att_type
lowerCAmelCase_ : Tuple = scope
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase_ : int = None
if self.use_token_type_ids:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : int ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = self.get_config()
lowerCAmelCase_ : Tuple = 3_00
return config
def lowerCamelCase ( self : List[Any] , a_ : Optional[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : Union[str, Any] , a_ : Dict , a_ : str , a_ : int , a_ : Any , a_ : Tuple ):
lowerCAmelCase_ : Union[str, Any] = DebertaModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : str = model(a_ , attention_mask=a_ , token_type_ids=a_ )[0]
lowerCAmelCase_ : List[str] = model(a_ , token_type_ids=a_ )[0]
lowerCAmelCase_ : Optional[Any] = model(a_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase ( self : Optional[Any] , a_ : Optional[int] , a_ : int , a_ : List[str] , a_ : int , a_ : Tuple , a_ : int , a_ : List[Any] ):
lowerCAmelCase_ : List[Any] = DebertaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Dict = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : str , a_ : List[str] , a_ : Tuple , a_ : Any , a_ : str , a_ : List[Any] , a_ : Tuple , a_ : Union[str, Any] ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : List[str] = DebertaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : str = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a_ )
def lowerCamelCase ( self : Any , a_ : Union[str, Any] , a_ : Any , a_ : str , a_ : int , a_ : Dict , a_ : int , a_ : Tuple ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Any = DebertaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : int = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Dict , a_ : Dict , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : List[Any] , a_ : int , a_ : str ):
lowerCAmelCase_ : Optional[int] = DebertaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Tuple = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : int = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : Dict = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : List[Any] = True
a_ : Dict = False
a_ : int = False
a_ : str = False
a_ : List[Any] = False
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Union[str, Any] = DebertaModelTester(self )
lowerCAmelCase_ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def lowerCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a_ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a_ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a_ )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = DebertaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@slow
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : int = DebertaModel.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase_ : str = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCAmelCase_ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(a_ , attention_mask=a_ )[0]
# compare the actual values for a slice.
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 241 | 0 |
from math import factorial
lowerCamelCase_ = {str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase ( a_ ):
if not isinstance(a_ , a_ ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a_ ) )
def lowerCamelCase ( a_ = 60 , a_ = 1_000_000 ):
if not isinstance(a_ , a_ ) or not isinstance(a_ , a_ ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
lowerCAmelCase_ = 0
# the cached sizes of the previous chains
lowerCAmelCase_ = {}
for start_chain_element in range(1 , a_ ):
# The temporary set will contain the elements of the chain
lowerCAmelCase_ = set()
lowerCAmelCase_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a_ )
chain_set_length += 1
lowerCAmelCase_ = digit_factorial_sum(a_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 354 |
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 14 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ :int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ :int = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCamelCase__ :Union[str, Any] = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
UpperCamelCase__ :Union[str, Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
UpperCamelCase__ :Any = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ :Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_tokenizer()
UpperCamelCase__ :Dict = self.get_rust_tokenizer()
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ :str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Any = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ :Optional[Any] = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
UpperCamelCase__ :List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :List[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :str = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :Dict = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
UpperCamelCase__ :List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[str] = '''lower newer'''
UpperCamelCase__ :str = processor(text=UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.get_image_processor()
UpperCamelCase__ :Tuple = self.get_tokenizer()
UpperCamelCase__ :List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Tuple = '''lower newer'''
UpperCamelCase__ :List[Any] = self.prepare_image_inputs()
UpperCamelCase__ :List[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.get_image_processor()
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :Dict = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ :List[Any] = processor(images=UpperCamelCase_ , visual_prompt=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
UpperCamelCase__ :Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ :Union[str, Any] = processor.batch_decode(UpperCamelCase_ )
UpperCamelCase__ :Dict = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) | 97 | import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_SCREAMING_SNAKE_CASE = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) )
UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase( UpperCamelCase_ = 100 ) -> List[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(UpperCamelCase_ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = PokerHand(UpperCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(UpperCamelCase_ )
UpperCamelCase = chain(sorted(UpperCamelCase_ ) )
for index, hand in enumerate(UpperCamelCase_ ):
assert hand == poker_hands[index]
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase( ) -> str:
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase( ) -> int:
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" )
with open(UpperCamelCase_ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ )
UpperCamelCase = player.compare_with(UpperCamelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 343 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = RoFormerTokenizer
__UpperCamelCase : Union[str, Any] = RoFormerTokenizerFast
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
def lowerCAmelCase__ ( self : Optional[Any] ):
super().setUp()
def lowerCAmelCase__ ( self : List[str] , **snake_case_ : Union[str, Any] ):
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **snake_case_ )
def lowerCAmelCase__ ( self : Dict , **snake_case_ : Union[str, Any] ):
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: int = """永和服装饰品有限公司,今天天气非常好"""
UpperCamelCase_: int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Dict = self.get_tokenizer()
UpperCamelCase_: Optional[int] = self.get_chinese_input_output_texts()
UpperCamelCase_: Optional[int] = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , output_text.split() )
UpperCamelCase_: Dict = tokens + [tokenizer.unk_token]
UpperCamelCase_: int = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Optional[int] = self.get_rust_tokenizer()
UpperCamelCase_: Any = self.get_chinese_input_output_texts()
UpperCamelCase_: int = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , output_text.split() )
UpperCamelCase_: Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCamelCase_: Optional[Any] = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
pass
def lowerCAmelCase__ ( self : Tuple ):
pass
def lowerCAmelCase__ ( self : Union[str, Any] ):
pass
| 357 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = """ybelkada/fonts"""
def A__ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
requires_backends(lowerCamelCase , ["""torch"""] )
_check_torch_version()
UpperCamelCase_: Tuple = image_tensor.unsqueeze(0 )
UpperCamelCase_: Any = torch.nn.functional.unfold(lowerCamelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_: int = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCamelCase , lowerCamelCase , -1 )
UpperCamelCase_: Any = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def A__ ( lowerCamelCase , lowerCamelCase = 36 , lowerCamelCase = "black" , lowerCamelCase = "white" , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = None , lowerCamelCase = None , ) -> Image.Image:
requires_backends(lowerCamelCase , """vision""" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_: List[str] = textwrap.TextWrapper(width=80 )
UpperCamelCase_: Optional[int] = wrapper.wrap(text=lowerCamelCase )
UpperCamelCase_: List[str] = """\n""".join(lowerCamelCase )
if font_bytes is not None and font_path is None:
UpperCamelCase_: List[Any] = io.BytesIO(lowerCamelCase )
elif font_path is not None:
UpperCamelCase_: List[Any] = font_path
else:
UpperCamelCase_: Tuple = hf_hub_download(lowerCamelCase , """Arial.TTF""" )
UpperCamelCase_: Optional[Any] = ImageFont.truetype(lowerCamelCase , encoding="""UTF-8""" , size=lowerCamelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_: str = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , lowerCamelCase ) )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = temp_draw.textbbox((0, 0) , lowerCamelCase , lowerCamelCase )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_: Optional[int] = text_width + left_padding + right_padding
UpperCamelCase_: List[str] = text_height + top_padding + bottom_padding
UpperCamelCase_: Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) , lowerCamelCase )
UpperCamelCase_: Optional[Any] = ImageDraw.Draw(lowerCamelCase )
draw.text(xy=(left_padding, top_padding) , text=lowerCamelCase , fill=lowerCamelCase , font=lowerCamelCase )
return image
def A__ ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> List[str]:
requires_backends(lowerCamelCase , """vision""" )
# Convert to PIL image if necessary
UpperCamelCase_: List[str] = to_pil_image(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = render_text(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_: Tuple = max(header_image.width , image.width )
UpperCamelCase_: Tuple = int(image.height * (new_width / image.width) )
UpperCamelCase_: Dict = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_: str = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_: Optional[Any] = to_numpy_array(lowerCamelCase )
if infer_channel_dimension_format(lowerCamelCase ) == ChannelDimension.LAST:
UpperCamelCase_: Tuple = to_channel_dimension_format(lowerCamelCase , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""flattened_patches"""]
def __init__( self : int , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : int = 2048 , snake_case_ : bool = False , **snake_case_ : Any , ):
super().__init__(**snake_case_ )
UpperCamelCase_: int = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
UpperCamelCase_: Tuple = do_normalize
UpperCamelCase_: List[Any] = do_convert_rgb
UpperCamelCase_: Tuple = max_patches
UpperCamelCase_: Tuple = is_vqa
def lowerCAmelCase__ ( self : int , snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : dict , **snake_case_ : Tuple ):
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
UpperCamelCase_: int = to_channel_dimension_format(snake_case_ , ChannelDimension.FIRST )
UpperCamelCase_: List[str] = torch.from_numpy(snake_case_ )
UpperCamelCase_, UpperCamelCase_: List[Any] = patch_size["""height"""], patch_size["""width"""]
UpperCamelCase_, UpperCamelCase_: Tuple = get_image_size(snake_case_ )
# maximize scale s.t.
UpperCamelCase_: List[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_: Any = max(min(math.floor(scale * image_height / patch_height ) , snake_case_ ) , 1 )
UpperCamelCase_: List[str] = max(min(math.floor(scale * image_width / patch_width ) , snake_case_ ) , 1 )
UpperCamelCase_: int = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_: Optional[Any] = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_: str = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=snake_case_ , antialias=snake_case_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_: List[str] = torch_extract_patches(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase_: List[Any] = patches.shape
UpperCamelCase_: List[str] = patches_shape[1]
UpperCamelCase_: Optional[Any] = patches_shape[2]
UpperCamelCase_: List[str] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_: Union[str, Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_: Optional[Any] = torch.arange(snake_case_ ).reshape([rows, 1] ).repeat(1 , snake_case_ ).reshape([rows * columns, 1] )
UpperCamelCase_: Optional[int] = torch.arange(snake_case_ ).reshape([1, columns] ).repeat(snake_case_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_: Union[str, Any] = row_ids.to(torch.floataa )
UpperCamelCase_: str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_: Optional[Any] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_: Tuple = torch.nn.functional.pad(snake_case_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_: List[Any] = to_numpy_array(snake_case_ )
return result
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple ):
if image.dtype == np.uinta:
UpperCamelCase_: List[str] = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_: str = np.mean(snake_case_ )
UpperCamelCase_: str = np.std(snake_case_ )
UpperCamelCase_: str = max(snake_case_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : ImageInput , snake_case_ : Optional[str] = None , snake_case_ : bool = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[Dict[str, int]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_: Optional[Any] = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_: Optional[int] = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_: Tuple = self.is_vqa
if kwargs.get("""data_format""" , snake_case_ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
UpperCamelCase_: Dict = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_: str = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_: Union[str, Any] = [to_numpy_array(snake_case_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
UpperCamelCase_: List[Any] = kwargs.pop("""font_bytes""" , snake_case_ )
UpperCamelCase_: List[Any] = kwargs.pop("""font_path""" , snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [header_text] * len(snake_case_ )
UpperCamelCase_: str = [
render_header(snake_case_ , header_text[i] , font_bytes=snake_case_ , font_path=snake_case_ )
for i, image in enumerate(snake_case_ )
]
if do_normalize:
UpperCamelCase_: Union[str, Any] = [self.normalize(image=snake_case_ ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_: str = [
self.extract_flattened_patches(image=snake_case_ , max_patches=snake_case_ , patch_size=snake_case_ )
for image in images
]
# create attention mask in numpy
UpperCamelCase_: List[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_: Optional[Any] = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=snake_case_ )
return encoded_outputs
| 223 | 0 |
from math import ceil
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = list(range(0 , UpperCamelCase_ ) )
UpperCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(UpperCamelCase_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(UpperCamelCase_ )
# Missing blocks
UpperCamelCase = [i for i in blocks if i not in device_map_blocks]
UpperCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(UpperCamelCase_ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = list(range(UpperCamelCase_ ) )
UpperCamelCase = int(ceil(n_layers / len(UpperCamelCase_ ) ) )
UpperCamelCase = [layers[i : i + n_blocks] for i in range(0 , UpperCamelCase_ , UpperCamelCase_ )]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
| 343 | from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
# expected sequence length = num_patches
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = outputs_dict[0].numpy()
UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ):
UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase_ ):
UpperCamelCase = v.numpy()
else:
UpperCamelCase = np.array(lowerCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.constant(lowerCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase_ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),)
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ )
}
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase = main_layer_class(lowerCamelCase_ )
UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) )
UpperCamelCase = model(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" )
model.save(lowerCamelCase_ )
UpperCamelCase = tf.keras.models.load_model(
lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase_ , tf.keras.Model )
UpperCamelCase = model(lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = outputs.last_hidden_state.numpy()
UpperCamelCase = 0
else:
UpperCamelCase = outputs.logits.numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = after_outputs["""last_hidden_state"""].numpy()
UpperCamelCase = 0
else:
UpperCamelCase = after_outputs["""logits"""].numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase_ )
UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase = model_class.from_config(model.config )
UpperCamelCase = new_model(lowerCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 343 | 1 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__snake_case = logging.get_logger(__name__)
def a ( __a , __a , __a , __a=None , __a=None ) -> Union[str, Any]:
'''simple docstring'''
if "." in tensor_name:
UpperCamelCase__ :Tuple = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase__ :Optional[Any] = getattr(__a , __a )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
UpperCamelCase__ :List[Any] = new_module
UpperCamelCase__ :Tuple = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
UpperCamelCase__ :Union[str, Any] = tensor_name in module._buffers
UpperCamelCase__ :Dict = getattr(__a , __a )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
UpperCamelCase__ :Union[str, Any] = False
UpperCamelCase__ :Union[str, Any] = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase__ :Union[str, Any] = False
UpperCamelCase__ :Optional[Any] = False
else:
UpperCamelCase__ :Union[str, Any] = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase__ :List[str] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase__ :int = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase__ :int = old_value.to(__a )
elif isinstance(__a , torch.Tensor ):
UpperCamelCase__ :Tuple = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase__ :Union[str, Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase__ :Optional[Any] = torch.tensor(__a , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __a ) and fpaa_statistics is None:
UpperCamelCase__ :List[Any] = new_value.T
UpperCamelCase__ :Dict = old_value.__dict__
if is_abit:
UpperCamelCase__ :Union[str, Any] = bnb.nn.IntaParams(__a , requires_grad=__a , **__a ).to(__a )
elif is_abit:
UpperCamelCase__ :Optional[int] = bnb.nn.Paramsabit(__a , requires_grad=__a , **__a ).to(__a )
UpperCamelCase__ :str = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(__a ) )
else:
if value is None:
UpperCamelCase__ :List[Any] = old_value.to(__a )
elif isinstance(__a , torch.Tensor ):
UpperCamelCase__ :List[Any] = value.to(__a )
else:
UpperCamelCase__ :Tuple = torch.tensor(__a , device=__a )
if is_buffer:
UpperCamelCase__ :List[Any] = new_value
else:
UpperCamelCase__ :Any = nn.Parameter(__a , requires_grad=old_value.requires_grad )
UpperCamelCase__ :Any = new_value
def a ( __a , __a=None , __a=None , __a=None , __a=False ) -> Optional[Any]:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase__ :List[str] = []
current_key_name.append(__a )
if (isinstance(__a , nn.Linear ) or isinstance(__a , __a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(__a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__a , __a ):
UpperCamelCase__ , UpperCamelCase__ :str = module.weight.shape
else:
UpperCamelCase__ :int = module.in_features
UpperCamelCase__ :Any = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase__ :str = bnb.nn.LinearabitLt(
__a , __a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase__ :Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase__ :List[Any] = bnb.nn.Linearabit(
__a , __a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase__ :Any = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase__ :Optional[Any] = type(__a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__a )
if len(list(module.children() ) ) > 0:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = _replace_with_bnb_linear(
__a , __a , __a , __a , has_been_replaced=__a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( __a , __a=None , __a=None , __a=None ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Tuple = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase__ , UpperCamelCase__ :List[str] = _replace_with_bnb_linear(
__a , __a , __a , __a )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *__a , **__a ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , __a , )
return replace_with_bnb_linear(*__a , **__a )
def a ( *__a , **__a ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , __a , )
return set_module_quantized_tensor_to_device(*__a , **__a )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[str] = deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase__ :str = find_tied_parameters(__a )
# For compatibility with Accelerate < 0.18
if isinstance(__a , __a ):
UpperCamelCase__ :Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase__ :str = sum(__a , [] )
UpperCamelCase__ :Tuple = len(__a ) > 0
# Check if it is a base model
UpperCamelCase__ :Any = not hasattr(__a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase__ :List[str] = list(model.named_children() )
UpperCamelCase__ :int = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase__ :Optional[int] = set(__a ) - set(__a )
UpperCamelCase__ :Tuple = list(set(__a ) ) + list(__a )
# remove ".weight" from the keys
UpperCamelCase__ :int = ['''.weight''', '''.bias''']
UpperCamelCase__ :Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase__ :Optional[Any] = name.replace(__a , '''''' )
filtered_module_names.append(__a )
return filtered_module_names | 219 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def a ( __a = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def a ( __a = "" ) -> bool:
'''simple docstring'''
if len(__a ) == 0:
return True
UpperCamelCase__ :List[Any] = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCamelCase__ :dict[str, int] = {}
for character in lower_case_input_str:
UpperCamelCase__ :Optional[int] = character_freq_dict.get(__a , 0 ) + 1
UpperCamelCase__ :List[str] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a ( __a = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , __a , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__a ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__a ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__snake_case = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
__snake_case = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""") | 219 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __A( a ):
snake_case_ = '''facebook/bart-large-mnli'''
snake_case_ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
snake_case_ = '''text_classifier'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSequenceClassification
snake_case_ = ['''text''', ['''text''']]
snake_case_ = ['''text''']
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
super().setup()
__a = self.model.config
__a = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__a = int(_snake_case )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = labels
return self.pre_processor(
[text] * len(_snake_case ) , [F"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
__a = outputs.logits
__a = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 6 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 6 | 1 |
from math import factorial
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(snake_case__, snake_case__ ) or not isinstance(snake_case__, snake_case__ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
__UpperCAmelCase : List[Any] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__UpperCAmelCase : Dict = float(factorial(snake_case__ ) )
coefficient /= factorial(snake_case__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.7_5))
| 342 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase :
lowerCAmelCase_ = XGLMConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = "gelu"
def __init__( self : Any , __lowercase : List[str] , __lowercase : int=14 , __lowercase : Optional[int]=7 , __lowercase : List[str]=True , __lowercase : int=True , __lowercase : Optional[int]=True , __lowercase : Optional[int]=99 , __lowercase : List[Any]=32 , __lowercase : str=2 , __lowercase : List[Any]=4 , __lowercase : Dict=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : str=0.1 , __lowercase : str=512 , __lowercase : List[Any]=0.0_2 , ):
"""simple docstring"""
__lowercase =parent
__lowercase =batch_size
__lowercase =seq_length
__lowercase =is_training
__lowercase =use_input_mask
__lowercase =use_labels
__lowercase =vocab_size
__lowercase =d_model
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =ffn_dim
__lowercase =activation_function
__lowercase =activation_dropout
__lowercase =attention_dropout
__lowercase =max_position_embeddings
__lowercase =initializer_range
__lowercase =None
__lowercase =0
__lowercase =2
__lowercase =1
def snake_case ( self : int ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowercase =None
if self.use_input_mask:
__lowercase =random_attention_mask([self.batch_size, self.seq_length] )
__lowercase =self.get_config()
__lowercase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case ( self : Dict ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__lowercase , )
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) =config_and_inputs
__lowercase ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCAmelCase_ = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCAmelCase_ = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case ( self : int ):
"""simple docstring"""
__lowercase =TFXGLMModelTester(self )
__lowercase =ConfigTester(self , config_class=__lowercase , n_embd=37 )
def snake_case ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase =TFXGLMModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : str , __lowercase : List[Any]=True ):
"""simple docstring"""
__lowercase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowercase =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__lowercase =model.generate(__lowercase , do_sample=__lowercase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __lowercase )
@slow
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowercase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__lowercase =tokenizer('Today is a nice day and' , return_tensors='tf' )
__lowercase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__lowercase =model.generate(__lowercase , do_sample=__lowercase , seed=[7, 0] )
__lowercase =tokenizer.decode(output_ids[0] , skip_special_tokens=__lowercase )
__lowercase =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(__lowercase , __lowercase )
@slow
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowercase =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowercase ='left'
# use different length sentences to test batching
__lowercase =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowercase =tokenizer(__lowercase , return_tensors='tf' , padding=__lowercase )
__lowercase =inputs['input_ids']
__lowercase =model.generate(input_ids=__lowercase , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__lowercase =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowercase =model.generate(input_ids=__lowercase , max_new_tokens=12 )
__lowercase =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowercase =model.generate(input_ids=__lowercase , max_new_tokens=12 )
__lowercase =tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
__lowercase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowercase )
__lowercase =tokenizer.decode(output_padded[0] , skip_special_tokens=__lowercase )
__lowercase =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , [non_padded_sentence, padded_sentence] )
| 141 |
'''simple docstring'''
import heapq
def __UpperCamelCase ( lowercase__ : dict ):
'''simple docstring'''
__lowercase =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase__, [-1 * len(lowercase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowercase =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowercase =heapq.heappop(lowercase__ )[1][0]
chosen_vertices.add(lowercase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowercase =elem[1][1].index(lowercase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 141 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 3
lowercase__ = (32, 32)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
def extract(*_lowercase :Optional[Any] , **_lowercase :Tuple ):
class lowerCAmelCase :
def __init__( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = torch.ones([0] )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
self.pixel_values.to(_lowercase )
return self
return Out()
return extract
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.dummy_cond_unet
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
lowercase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = "A painting of a squirrel eating a burger"
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(0 )
lowercase__ = sd_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowercase__ = output.images
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(0 )
lowercase__ = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_lowercase , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.dummy_cond_unet
lowercase__ = PNDMScheduler(skip_prk_steps=_lowercase )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
lowercase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = "A painting of a squirrel eating a burger"
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(0 )
lowercase__ = sd_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowercase__ = output.images
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(0 )
lowercase__ = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_lowercase , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(pipe.scheduler , _lowercase )
assert pipe.safety_checker is None
lowercase__ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowercase )
lowercase__ = StableDiffusionPipeline.from_pretrained(_lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase__ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.dummy_cond_unet
lowercase__ = PNDMScheduler(skip_prk_steps=_lowercase )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowercase__ = unet.half()
lowercase__ = vae.half()
lowercase__ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
lowercase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = "A painting of a squirrel eating a burger"
lowercase__ = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_lowercase )
lowercase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowercase__ = 40_03_66_03_46
lowercase__ = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase__ = torch.manual_seed(_lowercase )
lowercase__ = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowercase__ = torch.manual_seed(_lowercase )
lowercase__ = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_lowercase )
lowercase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = "padme amidala taking a bath artwork, safe for work, no nudity"
lowercase__ = 27_34_97_17_55
lowercase__ = 7
lowercase__ = torch.manual_seed(_lowercase )
lowercase__ = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowercase__ = torch.manual_seed(_lowercase )
lowercase__ = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowercase__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowercase__ = 10_44_35_52_34
lowercase__ = 12
lowercase__ = torch.manual_seed(_lowercase )
lowercase__ = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowercase__ = torch.manual_seed(_lowercase )
lowercase__ = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=_lowercase , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 201 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = 1
@register_to_config
def __init__( self :Dict , _lowercase :int = 10_00 , _lowercase :Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(_lowercase )
# standard deviation of the initial noise distribution
lowercase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase__ = 4
# running values
lowercase__ = []
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = num_inference_steps
lowercase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase__ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase__ = (1.0 - self.betas**2) ** 0.5
lowercase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase__ = timesteps.to(_lowercase )
lowercase__ = []
def UpperCAmelCase ( self :Optional[int] , _lowercase :torch.FloatTensor , _lowercase :int , _lowercase :torch.FloatTensor , _lowercase :bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase__ = (self.timesteps == timestep).nonzero().item()
lowercase__ = timestep_index + 1
lowercase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowercase )
if len(self.ets ) == 1:
lowercase__ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase__ = self._get_prev_sample(_lowercase , _lowercase , _lowercase , _lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :torch.FloatTensor , *_lowercase :int , **_lowercase :int ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :str , _lowercase :Tuple , _lowercase :int , _lowercase :Optional[Any] , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.alphas[timestep_index]
lowercase__ = self.betas[timestep_index]
lowercase__ = self.alphas[prev_timestep_index]
lowercase__ = self.betas[prev_timestep_index]
lowercase__ = (sample - sigma * ets) / max(_lowercase , 1e-8 )
lowercase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 201 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowercase_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Union[str, Any]:
for attribute in key.split('''.''' ):
__a = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
__a = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> int:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__a = None
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
elif name.split('''.''' )[0] == "proj":
__a = fairseq_model.proj
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
__a = '''weight'''
else:
__a = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> str:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> List[str]:
__a , __a = emb.weight.shape
__a = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__a = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Tuple:
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.readlines()
__a = [line.split(''' ''' )[0] for line in lines]
__a = len(lowerCAmelCase__ )
__a = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(lowerCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , ) -> Union[str, Any]:
__a = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
__a = SpeechaTextaConfig.from_pretrained(
lowerCAmelCase__ , vocab_size=lowerCAmelCase__ , decoder_layers=lowerCAmelCase__ , do_stable_layer_norm=lowerCAmelCase__ )
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__a = model[0].eval()
# set weights for wav2vec2 encoder
__a = WavaVecaModel(lowerCAmelCase__ )
__a = recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase__ )
__a = SpeechaTextaForCausalLM(lowerCAmelCase__ )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase__ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__a = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__a = SpeechEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
__a = False
# add projection layer
__a = nn.Parameter(projection_layer.weight )
__a = nn.Parameter(projection_layer.bias )
__a = create_vocab_dict(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
__a = SpeechaTextaTokenizer(os.path.join(lowerCAmelCase__ , '''vocab.json''' ) )
tokenizer.save_pretrained(lowerCAmelCase__ )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = '''speech_to_text_2'''
__a = '''wav2vec2'''
__a = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
feature_extractor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
lowercase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 45 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=14 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_token_type_ids
__a = use_input_mask
__a = use_labels
__a = use_mc_token_ids
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
__a = self.vocab_size - 1
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
if self.use_mc_token_ids:
__a = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
__a = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = CTRLModel(config=_a )
model.to(_a )
model.eval()
model(_a , token_type_ids=_a , head_mask=_a )
model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = CTRLLMHeadModel(_a )
model.to(_a )
model.eval()
__a = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a , _a , _a , *_a ):
__a = self.num_labels
__a = CTRLForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : str = False
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCAmelCase ( self ):
__a = CTRLModelTester(self )
__a = ConfigTester(self , config_class=_a , n_embd=37 )
def __UpperCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
@slow
def __UpperCAmelCase ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = CTRLModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self ):
pass
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCAmelCase ( self ):
__a = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_a )
__a = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_a ) # Legal the president is
__a = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__a = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].tolist() , _a )
| 45 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Any = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Tuple = "switch_transformers"
UpperCamelCase : Tuple = ["past_key_values"]
UpperCamelCase : Any = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , A=3_21_28 , A=7_68 , A=64 , A=20_48 , A=64 , A=12 , A=3 , A=12 , A=3 , A=12 , A=8 , A=False , A=0.01 , A="float32" , A=False , A=32 , A=1_28 , A=0.1 , A=1e-6 , A=0.001 , A=0.001 , A=1.0 , A="relu" , A=True , A=False , A=True , A=0 , A=1 , **A , ) -> str:
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = d_model
lowerCamelCase = d_kv
lowerCamelCase = d_ff
lowerCamelCase = num_sparse_encoder_layers
lowerCamelCase = num_layers
lowerCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCamelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCamelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCamelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCamelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCamelCase = num_heads
lowerCamelCase = num_experts
lowerCamelCase = expert_capacity
lowerCamelCase = router_bias
lowerCamelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
lowerCamelCase = router_dtype
lowerCamelCase = router_ignore_padding_tokens
lowerCamelCase = relative_attention_num_buckets
lowerCamelCase = relative_attention_max_distance
lowerCamelCase = dropout_rate
lowerCamelCase = layer_norm_epsilon
lowerCamelCase = initializer_factor
lowerCamelCase = feed_forward_proj
lowerCamelCase = use_cache
lowerCamelCase = add_router_probs
lowerCamelCase = router_z_loss_coef
lowerCamelCase = router_aux_loss_coef
lowerCamelCase = self.feed_forward_proj.split("""-""" )
lowerCamelCase = act_info[-1]
lowerCamelCase = act_info[0] == """gated"""
if len(A ) > 1 and act_info[0] != "gated" or len(A ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase = """gelu_new"""
super().__init__(
pad_token_id=A , eos_token_id=A , is_encoder_decoder=A , **A , )
| 66 |
class __lowercase :
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
lowerCamelCase = {} # Mapping from char to TrieNode
lowerCamelCase = False
def __A ( self , A ) -> None:
'''simple docstring'''
for word in words:
self.insert(A )
def __A ( self , A ) -> None:
'''simple docstring'''
lowerCamelCase = self
for char in word:
if char not in curr.nodes:
lowerCamelCase = TrieNode()
lowerCamelCase = curr.nodes[char]
lowerCamelCase = True
def __A ( self , A ) -> bool:
'''simple docstring'''
lowerCamelCase = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase = curr.nodes[char]
return curr.is_leaf
def __A ( self , A ) -> None:
'''simple docstring'''
def _delete(A , A , A ) -> bool:
if index == len(A ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase = False
return len(curr.nodes ) == 0
lowerCamelCase = word[index]
lowerCamelCase = curr.nodes.get(A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase = _delete(A , A , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A , 0 )
def __lowerCamelCase ( lowerCamelCase__ : TrieNode , lowerCamelCase__ : str ):
'''simple docstring'''
if node.is_leaf:
print(lowerCamelCase__ , end=""" """ )
for key, value in node.nodes.items():
print_words(lowerCamelCase__ , word + key )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = """banana bananas bandana band apple all beast""".split()
lowerCamelCase = TrieNode()
root.insert_many(lowerCamelCase__ )
# print_words(root, "")
assert all(root.find(lowerCamelCase__ ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : bool ):
'''simple docstring'''
print(str(lowerCamelCase__ ) , """works!""" if passes else """doesn't work :(""" )
def __lowerCamelCase ( ):
'''simple docstring'''
assert test_trie()
def __lowerCamelCase ( ):
'''simple docstring'''
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 66 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
snake_case_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
snake_case_ : List[Any] = object()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(UpperCAmelCase_ ) - len(UpperCAmelCase_ ) + 1 ):
_UpperCamelCase : Dict = [x.match(UpperCAmelCase_ ) for x, y in zip(UpperCAmelCase_ , ks[i:] )]
if matches and all(UpperCAmelCase_ ):
return True
return False
def A__ ( UpperCAmelCase_ ):
def replace(UpperCAmelCase_ , UpperCAmelCase_ ):
for rule, replacement in rules:
if _match(UpperCAmelCase_ , UpperCAmelCase_ ):
return replacement
return val
return replace
def A__ ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , UpperCAmelCase_ )),
(("transformer", "wte", "embedding"), P('mp' , UpperCAmelCase_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase_ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , UpperCAmelCase_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCAmelCase_ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , UpperCAmelCase_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = _get_partition_rules()
_UpperCamelCase : Any = _replacement_rules(UpperCAmelCase_ )
_UpperCamelCase : Any = {k: _unmatched for k in flatten_dict(UpperCAmelCase_ )}
_UpperCamelCase : List[str] = {k: replace(UpperCAmelCase_ , UpperCAmelCase_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCAmelCase_ ) )
| 83 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case__ ( enum.Enum):
a_ = 0
a_ = 1
a_ = 2
@add_end_docstrings(UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]:
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Dict = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params )
UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params}
def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : List[Any] = prefix
if prefix:
UpperCAmelCase_ : Tuple = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
UpperCAmelCase_ : Union[str, Any] = handle_long_generation
preprocess_params.update(_A )
UpperCAmelCase_ : Optional[int] = generate_kwargs
UpperCAmelCase_ : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : List[Any] = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase_ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict:
return super().__call__(_A , **_A )
def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : str = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens''']
else:
UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]:
UpperCAmelCase_ : Any = model_inputs['''input_ids''']
UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = 1
else:
UpperCAmelCase_ : Optional[int] = input_ids.shape[0]
UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
UpperCAmelCase_ : Any = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0]
UpperCAmelCase_ : int = model_outputs['''input_ids''']
UpperCAmelCase_ : str = model_outputs['''prompt_text''']
UpperCAmelCase_ : Any = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : Any = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[str] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : List[str] = {'''generated_text''': all_text}
records.append(_A )
return records
| 304 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Union[str, Any] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] ="""realm"""
def __init__( self , __a=3_05_22 , __a=7_68 , __a=1_28 , __a=12 , __a=12 , __a=8 , __a=30_72 , __a="gelu_new" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.0_2 , __a=1e-1_2 , __a=2_56 , __a=10 , __a=1e-3 , __a=5 , __a=3_20 , __a=13_35_37_18 , __a=50_00 , __a=1 , __a=0 , __a=2 , **__a , ):
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = retriever_proj_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = num_candidates
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = layer_norm_eps
# Reader config
__lowerCAmelCase = span_hidden_size
__lowerCAmelCase = max_span_width
__lowerCAmelCase = reader_layer_norm_eps
__lowerCAmelCase = reader_beam_size
__lowerCAmelCase = reader_seq_len
# Retrieval config
__lowerCAmelCase = num_block_records
__lowerCAmelCase = searcher_beam_size
| 351 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=() , _UpperCamelCase=None , _UpperCamelCase="no" , _UpperCamelCase="29500" ):
'''simple docstring'''
__lowerCAmelCase = False
__lowerCAmelCase = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
__lowerCAmelCase = True
elif "IPython" in sys.modules:
__lowerCAmelCase = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
__lowerCAmelCase = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , _UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
__lowerCAmelCase = 8
__lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , distributed_type="TPU" )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*_UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr="127.0.01" , master_port=_UpperCamelCase , mixed_precision=_UpperCamelCase ):
__lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , distributed_type="MULTI_GPU" )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__lowerCAmelCase = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=() , _UpperCamelCase=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
__lowerCAmelCase = PrepareForLaunch(_UpperCamelCase , debug=_UpperCamelCase )
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" )
| 259 | 0 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = len(UpperCamelCase_ )
UpperCamelCase__ :str = [0] * len_array
if len_array > 0:
UpperCamelCase__ :Optional[Any] = array[0]
for i in range(1 , UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 12 | 0 |
"""simple docstring"""
lowerCamelCase__ = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.6_0_9_3_4_4,
"knot": 1.8_5_2,
}
lowerCamelCase__ = {
"km/h": 1.0,
"m/s": 0.2_7_7_7_7_7_7_7_8,
"mph": 0.6_2_1_3_7_1_1_9_2,
"knot": 0.5_3_9_9_5_6_8_0_3,
}
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__lowerCAmelCase : str = (
F"Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n"
F"Valid values are: {', '.join(lowerCamelCase_ )}"
)
raise ValueError(lowerCamelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 358 |
"""simple docstring"""
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = size
__lowerCAmelCase : str = [0] * size
__lowerCAmelCase : Any = [0] * size
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
return index | (index + 1)
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
return (index & (index + 1)) - 1
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = value
while index < self.size:
__lowerCAmelCase : Dict = self.get_prev(_SCREAMING_SNAKE_CASE ) + 1
if current_left_border == index:
__lowerCAmelCase : Any = value
else:
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.get_next(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
right -= 1 # Because of right is exclusive
__lowerCAmelCase : Optional[int] = 0
while left <= right:
__lowerCAmelCase : Optional[int] = self.get_prev(_SCREAMING_SNAKE_CASE )
if left <= current_left:
__lowerCAmelCase : Optional[Any] = max(_SCREAMING_SNAKE_CASE , self.tree[right] )
__lowerCAmelCase : Optional[Any] = current_left
else:
__lowerCAmelCase : List[str] = max(_SCREAMING_SNAKE_CASE , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _UpperCAmelCase ( snake_case , snake_case=False , snake_case=False , snake_case=False ):
"""simple docstring"""
_lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'transformer.blocks.{i}.norm1.weight', F'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm1.bias', F'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.weight', F'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.bias', F'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.norm2.weight', F'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm2.bias', F'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.mlp.fc1.weight', F'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc1.bias', F'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.weight', F'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.bias', F'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_lowerCAmelCase = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.weight' )
_lowerCAmelCase = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase = in_proj_bias[: config.hidden_size]
_lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = dct.pop(__lowercase )
_lowerCAmelCase = val
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=__lowercase )
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
if "vqa" in checkpoint_url:
_lowerCAmelCase = True
_lowerCAmelCase = 31_29
_lowerCAmelCase = '''huggingface/label-files'''
_lowerCAmelCase = '''vqa2-id2label.json'''
_lowerCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
_lowerCAmelCase = True
_lowerCAmelCase = 2
_lowerCAmelCase = {0: '''False''', 1: '''True'''}
_lowerCAmelCase = {v: k for k, v in config.idalabel.items()}
_lowerCAmelCase = 3
_lowerCAmelCase = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
_lowerCAmelCase = True
_lowerCAmelCase = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
_lowerCAmelCase = True
_lowerCAmelCase = ViltForMaskedLM(__lowercase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowercase , map_location="""cpu""" )['''state_dict''']
_lowerCAmelCase = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
_lowerCAmelCase = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCAmelCase = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
_lowerCAmelCase = ViltImageProcessor(size=3_84 )
_lowerCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_lowerCAmelCase = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCAmelCase = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=__lowercase ).raw )
_lowerCAmelCase = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=__lowercase ).raw )
_lowerCAmelCase = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
_lowerCAmelCase = processor(__lowercase , __lowercase , return_tensors="""pt""" )
_lowerCAmelCase = processor(__lowercase , __lowercase , return_tensors="""pt""" )
_lowerCAmelCase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCAmelCase = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=__lowercase ).raw )
if mlm_model:
_lowerCAmelCase = '''a bunch of [MASK] laying on a [MASK].'''
else:
_lowerCAmelCase = '''How many cats are there?'''
_lowerCAmelCase = processor(__lowercase , __lowercase , return_tensors="""pt""" )
_lowerCAmelCase = model(**__lowercase )
# Verify outputs
if mlm_model:
_lowerCAmelCase = torch.Size([1, 11, 3_05_22] )
_lowerCAmelCase = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
_lowerCAmelCase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCAmelCase = torch.Size([1, 31_29] )
_lowerCAmelCase = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
_lowerCAmelCase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCAmelCase = torch.Size([1, 2] )
_lowerCAmelCase = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 82 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: List[Any] = torch.load(__lowercase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]:
A: Tuple = OrderedDict()
A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A: int = key
for name_pair in rename_keys_prefix:
A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
A: Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A: int = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A: Optional[Any] = '''pretraining'''
if "vcr" in checkpoint_path:
A: Optional[int] = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
A: Tuple = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 5_1_2}
A: List[str] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A: List[str] = {'''visual_embedding_dim''': 2_0_4_8}
A: Optional[int] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
A: Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
A: Optional[int] = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
A: str = '''nlvr'''
A: Union[str, Any] = VisualBertConfig(**__lowercase )
# Load State Dict
A: Union[str, Any] = load_state_dict(__lowercase )
A: str = get_new_dict(__lowercase , __lowercase )
if model_type == "pretraining":
A: Optional[Any] = VisualBertForPreTraining(__lowercase )
elif model_type == "vqa":
A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase )
elif model_type == "nlvr":
A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase )
elif model_type == "multichoice":
A: Any = VisualBertForMultipleChoice(__lowercase )
model.load_state_dict(__lowercase )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __snake_case( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __snake_case ( self , A_ , A_ , A_ ) -> str:
lowerCAmelCase = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowerCAmelCase = VideoClassificationPipeline(model=A_ , image_processor=A_ , top_k=2 )
lowerCAmelCase = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def __snake_case ( self , A_ , A_ ) -> Union[str, Any]:
for example in examples:
lowerCAmelCase = video_classifier(A_ )
self.assertEqual(
A_ , [
{"""score""": ANY(A_ ), """label""": ANY(A_ )},
{"""score""": ANY(A_ ), """label""": ANY(A_ )},
] , )
@require_torch
def __snake_case ( self ) -> int:
lowerCAmelCase = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowerCAmelCase = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
lowerCAmelCase = pipeline(
"""video-classification""" , model=A_ , feature_extractor=A_ , frame_sampling_rate=4 )
lowerCAmelCase = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowerCAmelCase = video_classifier(A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}] , )
lowerCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
] , )
@require_tf
def __snake_case ( self ) -> str:
pass | 187 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __snake_case( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_=1 , A_=False , **A_ ) -> Optional[int]:
super().__init__(**A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = d_embed
lowerCAmelCase = d_proj
lowerCAmelCase = cutoffs + [vocab_size]
lowerCAmelCase = [0] + self.cutoffs
lowerCAmelCase = div_val
lowerCAmelCase = self.cutoffs[0]
lowerCAmelCase = len(self.cutoffs ) - 1
lowerCAmelCase = self.shortlist_size + self.n_clusters
lowerCAmelCase = keep_order
lowerCAmelCase = []
lowerCAmelCase = []
def __snake_case ( self , A_ ) -> int:
if self.n_clusters > 0:
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=A_ , name="""cluster_weight""" )
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=A_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCAmelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' , )
self.out_projs.append(A_ )
else:
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = self.d_embed // (self.div_val**i)
lowerCAmelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' )
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(A_ )
@staticmethod
def __snake_case ( A_ , A_ , A_ , A_=None ) -> List[Any]:
lowerCAmelCase = x
if proj is not None:
lowerCAmelCase = tf.einsum("""ibd,ed->ibe""" , A_ , A_ )
return tf.einsum("""ibd,nd->ibn""" , A_ , A_ ) + b
@staticmethod
def __snake_case ( A_ , A_ ) -> Dict:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype )
lowerCAmelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(A_ , A_ )
def __snake_case ( self , A_ , A_ , A_=True , A_=False ) -> Tuple:
lowerCAmelCase = 0
if self.n_clusters == 0:
lowerCAmelCase = self._logit(A_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A_ , logits=A_ )
lowerCAmelCase = tf.nn.log_softmax(A_ , axis=-1 )
else:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = []
lowerCAmelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCAmelCase = (target >= l_idx) & (target < r_idx)
lowerCAmelCase = tf.where(A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ ) - l_idx
if self.div_val == 1:
lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx]
lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCAmelCase = self.out_layers[i][0]
lowerCAmelCase = self.out_layers[i][1]
if i == 0:
lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[0] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
else:
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[i] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(A_ )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(A_ , -cur_logprob , shape_list(A_ ) )
lowerCAmelCase = tf.concat(A_ , axis=-1 )
if target is not None:
if return_mean:
lowerCAmelCase = tf.reduce_mean(A_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(A_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(A_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out | 187 | 1 |
import math
import qiskit
def _UpperCamelCase ( snake_case__ = 1, snake_case__ = 1, snake_case__ = 1 ) -> Any:
if (
isinstance(lowercase__, lowercase__ )
or isinstance(lowercase__, lowercase__ )
or isinstance(lowercase__, lowercase__ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
__UpperCAmelCase : Optional[int] = qiskit.QuantumRegister(4, "qr" )
__UpperCAmelCase : Dict = qiskit.ClassicalRegister(2, "cr" )
# list the entries
__UpperCAmelCase : Dict = [input_a, input_a, carry_in]
__UpperCAmelCase : Optional[Any] = qiskit.QuantumCircuit(lowercase__, lowercase__ )
for i in range(0, 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0, 1, 3 ) # ccx = toffoli gate
quantum_circuit.cx(0, 1 )
quantum_circuit.ccx(1, 2, 3 )
quantum_circuit.cx(1, 2 )
quantum_circuit.cx(0, 1 )
quantum_circuit.measure([2, 3], lowercase__ ) # measure the last two qbits
__UpperCAmelCase : Any = qiskit.Aer.get_backend("aer_simulator" )
__UpperCAmelCase : List[Any] = qiskit.execute(lowercase__, lowercase__, shots=1000 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 157 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case , snake_case : Optional[Any] = {}, {}
if padding is not None:
snake_case : Optional[Any] = padding
if truncation is not None:
snake_case : Union[str, Any] = truncation
if top_k is not None:
snake_case : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Tuple = {"image": image, "question": question}
else:
snake_case : List[str] = image
snake_case : Optional[int] = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
snake_case : List[Any] = load_image(inputs["image"] )
snake_case : Tuple = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
snake_case : List[Any] = self.model.config.num_labels
if self.framework == "pt":
snake_case : Optional[int] = model_outputs.logits.sigmoid()[0]
snake_case , snake_case : Any = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case : Optional[Any] = scores.tolist()
snake_case : List[Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 148 | 0 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : str , ) -> Optional[int]:
lowerCamelCase_ : Dict ={
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
lowerCamelCase_ , lowerCamelCase_ : int =input_paths_and_base_extractors[compression_format]
if input_path is None:
lowerCamelCase_ : Optional[Any] =F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase__ )
assert base_extractor.is_extractable(lowerCamelCase__ )
lowerCamelCase_ : Tuple =tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(lowerCamelCase__ , lowerCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCamelCase_ : str =file_path.read_text(encoding="utf-8" )
else:
lowerCamelCase_ : str =output_path.read_text(encoding="utf-8" )
lowerCamelCase_ : Any =text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , ) -> Tuple:
lowerCamelCase_ : List[Any] ={
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
lowerCamelCase_ : Optional[Any] =input_paths[compression_format]
if input_path is None:
lowerCamelCase_ : Union[str, Any] =F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =Extractor.infer_extractor_format(lowerCamelCase__ )
assert extractor_format is not None
lowerCamelCase_ : Optional[int] =tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCamelCase_ : Tuple =file_path.read_text(encoding="utf-8" )
else:
lowerCamelCase_ : Any =output_path.read_text(encoding="utf-8" )
lowerCamelCase_ : str =text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ) -> Dict:
import tarfile
lowerCamelCase_ : List[str] =tmp_path / "data_dot_dot"
directory.mkdir()
lowerCamelCase_ : Optional[int] =directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(lowerCamelCase__ , "w" ) as f:
f.add(lowerCamelCase__ , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def _snake_case ( lowerCamelCase__ : List[Any] ) -> Optional[Any]:
import tarfile
lowerCamelCase_ : Union[str, Any] =tmp_path / "data_sym_link"
directory.mkdir()
lowerCamelCase_ : Optional[Any] =directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=lowerCamelCase__ )
with tarfile.TarFile(lowerCamelCase__ , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) -> List[Any]:
lowerCamelCase_ : str ={
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
lowerCamelCase_ : List[Any] =insecure_tar_files[insecure_tar_file]
lowerCamelCase_ : List[Any] =tmp_path / "extracted"
TarExtractor.extract(lowerCamelCase__ , lowerCamelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _snake_case ( lowerCamelCase__ : List[Any] ) -> int:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowerCamelCase_ : int =tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
lowerCamelCase_ : Tuple =(
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(lowerCamelCase__ )
assert zipfile.is_zipfile(str(lowerCamelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCamelCase__ ) # but we're right
| 209 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ : Tuple = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 1 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : Dict =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
lowerCamelCase__ : Optional[int] = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , UpperCamelCase )
if matches:
lowerCamelCase__ : Optional[int] = float(matches[1] )
lowerCamelCase__ : Optional[int] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCamelCase__ : List[Any] = 1001
lowerCamelCase__ : Any = """imagenet-1k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = """huggingface/label-files"""
lowerCamelCase__ : List[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : str = {int(UpperCamelCase ) + 1: v for k, v in idalabel.items()}
lowerCamelCase__ : Dict = """background"""
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : List[str] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = get_mobilenet_va_config(UpperCamelCase )
# Load 🤗 model
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCamelCase__ : Optional[Any] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
lowerCamelCase__ : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
lowerCamelCase__ : Dict = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCamelCase__ : Optional[Any] = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCamelCase__ : Union[str, Any] = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
lowerCamelCase__ : Tuple = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
lowerCamelCase__ : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A : Optional[Any] =parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[Any] = 'albert'
def __init__( self : Tuple ,_UpperCAmelCase : List[str]=30000 ,_UpperCAmelCase : Dict=128 ,_UpperCAmelCase : Dict=4096 ,_UpperCAmelCase : Tuple=12 ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : Any=64 ,_UpperCAmelCase : List[str]=16384 ,_UpperCAmelCase : Any=1 ,_UpperCAmelCase : str="gelu_new" ,_UpperCAmelCase : Any=0 ,_UpperCAmelCase : Union[str, Any]=0 ,_UpperCAmelCase : Optional[int]=512 ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : str=0.02 ,_UpperCAmelCase : Tuple=1E-12 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : int="absolute" ,_UpperCAmelCase : Optional[int]=0 ,_UpperCAmelCase : Any=2 ,_UpperCAmelCase : List[Any]=3 ,**_UpperCAmelCase : int ,):
super().__init__(pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[Any] = vocab_size
_a : List[str] = embedding_size
_a : Union[str, Any] = hidden_size
_a : List[Any] = num_hidden_layers
_a : List[Any] = num_hidden_groups
_a : int = num_attention_heads
_a : List[str] = inner_group_num
_a : int = hidden_act
_a : Optional[int] = intermediate_size
_a : List[str] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Any = max_position_embeddings
_a : str = type_vocab_size
_a : str = initializer_range
_a : Tuple = layer_norm_eps
_a : List[str] = classifier_dropout_prob
_a : str = position_embedding_type
class __magic_name__ ( _UpperCamelCase ):
@property
def __lowercase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
_a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 107 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
set_seed(770)
__lowerCAmelCase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__lowerCAmelCase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
__lowerCAmelCase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__lowerCAmelCase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_a : int = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[str]:
if model_type == "text":
_a : List[str] = BarkSemanticModel
_a : Optional[Any] = BarkSemanticConfig
_a : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
_a : Tuple = BarkCoarseModel
_a : str = BarkCoarseConfig
_a : str = BarkCoarseGenerationConfig
elif model_type == "fine":
_a : List[str] = BarkFineModel
_a : Optional[Any] = BarkFineConfig
_a : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
_a : Dict = f"""{model_type}_small""" if use_small else model_type
_a : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
_a : int = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
_a : List[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_a : Dict = model_args['vocab_size']
_a : Dict = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_a : List[Any] = model_args.pop('n_head' )
_a : Any = model_args.pop('n_embd' )
_a : List[Any] = model_args.pop('n_layer' )
_a : Optional[int] = ConfigClass(**checkpoint['model_args'] )
_a : List[str] = ModelClass(config=lowerCAmelCase_ )
_a : Tuple = GenerationConfigClass()
_a : Optional[Any] = model_generation_config
_a : Optional[Any] = checkpoint['model']
# fixup checkpoint
_a : int = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
_a : str = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
_a : List[Any] = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
_a : List[Any] = state_dict.pop(lowerCAmelCase_ )
_a : List[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_a : Tuple = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_a : Tuple = set(model.state_dict().keys() ) - set(state_dict.keys() )
_a : Optional[Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
_a : Dict = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
_a : Tuple = checkpoint['best_val_loss'].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss""" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[Any]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_a : Optional[int] = 'cpu' # do conversion on cpu
_a : Tuple = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
_a : List[Any] = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
_a : Any = _bark_load_model(lowerCAmelCase_ , 'cpu' , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
_a : int = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_a : Any = 5
_a : List[str] = 10
if model_type in ["text", "coarse"]:
_a : Dict = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_a : Dict = bark_model(lowerCAmelCase_ )[0]
_a : Tuple = model(lowerCAmelCase_ )
# take last logits
_a : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
_a : List[str] = 3
_a : List[Any] = 8
_a : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_a : Union[str, Any] = model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Any:
_a : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : Any = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[Any] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[str] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_a : str = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
_a : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
_a : int = BarkFineModel.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_a : Any = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_a : Optional[Any] = BarkModel(lowerCAmelCase_ )
_a : List[str] = semantic
_a : Union[str, Any] = coarseAcoustic
_a : Optional[int] = fineAcoustic
_a : Optional[Any] = codec
_a : List[Any] = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 107 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
A__ : Tuple =logging.get_logger(__name__) # pylint: disable=invalid-name
A__ : Any ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class UpperCAmelCase ( snake_case_ ):
_lowercase: Union[PIL.Image.Image, np.ndarray]
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , __snake_case : PriorTransformer , __snake_case : CLIPVisionModel , __snake_case : CLIPImageProcessor , __snake_case : HeunDiscreteScheduler , __snake_case : ShapERenderer , ) -> Any:
super().__init__()
self.register_modules(
prior=__snake_case , image_encoder=__snake_case , image_processor=__snake_case , scheduler=__snake_case , renderer=__snake_case , )
def lowercase__ ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : int , __snake_case : int , __snake_case : List[str] ) -> Optional[Any]:
if latents is None:
_lowerCAmelCase = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
_lowerCAmelCase = latents.to(__snake_case )
_lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowercase__ ( self : Optional[int] , __snake_case : Optional[int]=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase = torch.device(f"cuda:{gpu_id}" )
_lowerCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
@property
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__snake_case , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase__ ( self : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Optional[Any] , ) -> Any:
if isinstance(__snake_case , __snake_case ) and isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase = torch.cat(__snake_case , axis=0 ) if image[0].ndim == 4 else torch.stack(__snake_case , axis=0 )
if not isinstance(__snake_case , torch.Tensor ):
_lowerCAmelCase = self.image_processor(__snake_case , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_lowerCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__snake_case )
_lowerCAmelCase = self.image_encoder(__snake_case )["""last_hidden_state"""]
_lowerCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCAmelCase = image_embeds.repeat_interleave(__snake_case , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase = torch.zeros_like(__snake_case )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self : Optional[Any] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 1 , __snake_case : int = 25 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : float = 4.0 , __snake_case : int = 64 , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ) -> int:
if isinstance(__snake_case , PIL.Image.Image ):
_lowerCAmelCase = 1
elif isinstance(__snake_case , torch.Tensor ):
_lowerCAmelCase = image.shape[0]
elif isinstance(__snake_case , __snake_case ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_lowerCAmelCase = len(__snake_case )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__snake_case )}" )
_lowerCAmelCase = self._execution_device
_lowerCAmelCase = batch_size * num_images_per_prompt
_lowerCAmelCase = guidance_scale > 1.0
_lowerCAmelCase = self._encode_image(__snake_case , __snake_case , __snake_case , __snake_case )
# prior
self.scheduler.set_timesteps(__snake_case , device=__snake_case )
_lowerCAmelCase = self.scheduler.timesteps
_lowerCAmelCase = self.prior.config.num_embeddings
_lowerCAmelCase = self.prior.config.embedding_dim
_lowerCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __snake_case , __snake_case , __snake_case , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCAmelCase = latents.reshape(latents.shape[0] , __snake_case , __snake_case )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = self.scheduler.scale_model_input(__snake_case , __snake_case )
_lowerCAmelCase = self.prior(
__snake_case , timestep=__snake_case , proj_embedding=__snake_case , ).predicted_image_embedding
# remove the variance
_lowerCAmelCase , _lowerCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCAmelCase = self.scheduler.step(
__snake_case , timestep=__snake_case , sample=__snake_case , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__snake_case )
_lowerCAmelCase = []
for i, latent in enumerate(__snake_case ):
print()
_lowerCAmelCase = self.renderer.decode(
latent[None, :] , __snake_case , size=__snake_case , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(__snake_case )
_lowerCAmelCase = torch.stack(__snake_case )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
_lowerCAmelCase = images.cpu().numpy()
if output_type == "pil":
_lowerCAmelCase = [self.numpy_to_pil(__snake_case ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__snake_case )
| 70 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = len(lowerCAmelCase )
for i in range(length - 1 ):
_lowerCAmelCase = i
for k in range(i + 1 , lowerCAmelCase ):
if collection[k] < collection[least]:
_lowerCAmelCase = k
if least != i:
_lowerCAmelCase , _lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
A__ : str =input('''Enter numbers separated by a comma:\n''').strip()
A__ : Optional[int] =[int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 70 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 167 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 167 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
A__: Any = logging.get_logger(__name__)
A__: List[str] = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """imagegpt"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: int , __lowerCamelCase: List[str]=512 + 1 , __lowerCamelCase: int=32 * 32 , __lowerCamelCase: List[str]=512 , __lowerCamelCase: Optional[int]=24 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=None , __lowerCamelCase: int="quick_gelu" , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Dict=1e-5 , __lowerCamelCase: Dict=0.02 , __lowerCamelCase: int=True , __lowerCamelCase: int=True , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: Any=False , __lowerCamelCase: List[str]=False , **__lowerCamelCase: Dict , ):
'''simple docstring'''
UpperCamelCase__: Any = vocab_size
UpperCamelCase__: Optional[Any] = n_positions
UpperCamelCase__: List[Any] = n_embd
UpperCamelCase__: Optional[Any] = n_layer
UpperCamelCase__: Dict = n_head
UpperCamelCase__: Union[str, Any] = n_inner
UpperCamelCase__: Union[str, Any] = activation_function
UpperCamelCase__: List[Any] = resid_pdrop
UpperCamelCase__: int = embd_pdrop
UpperCamelCase__: Tuple = attn_pdrop
UpperCamelCase__: Optional[Any] = layer_norm_epsilon
UpperCamelCase__: Tuple = initializer_range
UpperCamelCase__: Any = scale_attn_weights
UpperCamelCase__: int = use_cache
UpperCamelCase__: Union[str, Any] = scale_attn_by_inverse_layer_idx
UpperCamelCase__: int = reorder_and_upcast_attn
UpperCamelCase__: List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=lowerCamelCase__ , **lowerCamelCase__ )
class _a ( UpperCamelCase__):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def UpperCAmelCase_ ( self: str , __lowerCamelCase: Dict , __lowerCamelCase: Dict = 1 , __lowerCamelCase: List[str] = -1 , __lowerCamelCase: Any = False , __lowerCamelCase: Any = None , __lowerCamelCase: str = 3 , __lowerCamelCase: List[str] = 32 , __lowerCamelCase: str = 32 , ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self._generate_dummy_images(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__: Any = dict(preprocessor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ ) )
return inputs
| 149 |
def A ( a_ ) -> int:
__UpperCamelCase : Any =len(a_ )
while cur > 1:
# Find the maximum number in arr
__UpperCamelCase : Any =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__UpperCamelCase : Any =arr[mi::-1] + arr[mi + 1 : len(a_ )]
# Reverse whole list
__UpperCamelCase : str =arr[cur - 1 :: -1] + arr[cur : len(a_ )]
cur -= 1
return arr
if __name__ == "__main__":
A_ :Dict = input('''Enter numbers separated by a comma:\n''').strip()
A_ :Any = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 71 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = params
_snake_case = np.array(lowerCAmelCase_ )
_snake_case = np.array([len(lowerCAmelCase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , lowerCAmelCase_ ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def lowerCamelCase ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.params.max_model_input_size
_snake_case = self.lengths > max_len
logger.info(F'Splitting {sum(lowerCAmelCase_ )} too long sequences.' )
def divide_chunks(lowerCAmelCase_ , lowerCAmelCase_ ):
return [l[i : i + n] for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )]
_snake_case = []
_snake_case = []
if self.params.mlm:
_snake_case , _snake_case = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
_snake_case , _snake_case = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_snake_case = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_snake_case = np.insert(lowerCAmelCase_ , 0 , lowerCAmelCase_ )
if sub_s[-1] != sep_id:
_snake_case = np.insert(lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCAmelCase_ )
new_tok_ids.extend(lowerCAmelCase_ )
new_lengths.extend([len(lowerCAmelCase_ ) for l in sub_seqs] )
_snake_case = np.array(lowerCAmelCase_ )
_snake_case = np.array(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = len(self )
_snake_case = self.lengths > 11
_snake_case = self.token_ids[indices]
_snake_case = self.lengths[indices]
_snake_case = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def lowerCamelCase ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
_snake_case = self.params.special_tok_ids['unk_token']
_snake_case = len(self )
_snake_case = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_snake_case = (unk_occs / self.lengths) < 0.5
_snake_case = self.token_ids[indices]
_snake_case = self.lengths[indices]
_snake_case = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def lowerCamelCase ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = [t[0] for t in batch]
_snake_case = [t[1] for t in batch]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
# Max for paddings
_snake_case = max(lowerCAmelCase_ )
# Pad token ids
if self.params.mlm:
_snake_case = self.params.special_tok_ids['pad_token']
else:
_snake_case = self.params.special_tok_ids['unk_token']
_snake_case = [list(t.astype(lowerCAmelCase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCAmelCase_ )) for t in token_ids]
assert len(tk_ ) == len(lowerCAmelCase_ )
assert all(len(lowerCAmelCase_ ) == max_seq_len_ for t in tk_ )
_snake_case = torch.tensor(tk_ ) # (bs, max_seq_len_)
_snake_case = torch.tensor(lowerCAmelCase_ ) # (bs)
return tk_t, lg_t
| 160 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """SpeechT5FeatureExtractor"""
__lowercase = """SpeechT5Tokenizer"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = kwargs.pop('audio' , lowerCAmelCase_ )
_snake_case = kwargs.pop('text' , lowerCAmelCase_ )
_snake_case = kwargs.pop('text_target' , lowerCAmelCase_ )
_snake_case = kwargs.pop('audio_target' , lowerCAmelCase_ )
_snake_case = kwargs.pop('sampling_rate' , lowerCAmelCase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
_snake_case = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
elif text is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_snake_case = None
if audio_target is not None:
_snake_case = self.feature_extractor(audio_target=lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_values']
elif text_target is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_ids']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = kwargs.pop('input_values' , lowerCAmelCase_ )
_snake_case = kwargs.pop('input_ids' , lowerCAmelCase_ )
_snake_case = kwargs.pop('labels' , lowerCAmelCase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
_snake_case = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
elif input_ids is not None:
_snake_case = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and "input_ids" in labels[0]):
_snake_case = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_ids']
else:
_snake_case = self.feature_extractor.feature_size
_snake_case = self.feature_extractor.num_mel_bins
_snake_case = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = feature_size_hack
_snake_case = targets['input_values']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 160 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314 | 1 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _a ( ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[str] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCAmelCase )
EnvironmentCommand.register_subcommand(UpperCAmelCase )
TestCommand.register_subcommand(UpperCAmelCase )
RunBeamCommand.register_subcommand(UpperCAmelCase )
DummyDataCommand.register_subcommand(UpperCAmelCase )
# Parse args
lowerCamelCase__ , lowerCamelCase__ : str = parser.parse_known_args()
if not hasattr(UpperCAmelCase , '''func''' ):
parser.print_help()
exit(1 )
lowerCamelCase__ : Any = parse_unknown_args(UpperCAmelCase )
# Run
lowerCamelCase__ : List[str] = args.func(UpperCAmelCase , **UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 265 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_A : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = ["audio_values", "audio_mask"]
def __init__( self : Any , A : Union[str, Any]=2_0_4_8 , A : Any=1 , A : int=[1_6, 1_6] , A : Any=1_2_8 , A : List[Any]=4_4_1_0_0 , A : Dict=8_6 , A : Dict=2_0_4_8 , A : str=0.0 , **A : Union[str, Any] , ) ->List[Any]:
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
lowerCamelCase__ : Dict = spectrogram_length
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Any = feature_size // self.patch_size[1]
lowerCamelCase__ : Union[str, Any] = n_fft
lowerCamelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ : Optional[Any] = sampling_rate
lowerCamelCase__ : Optional[Any] = padding_value
lowerCamelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __lowerCamelCase ( self : List[Any] , A : np.array ) ->np.ndarray:
lowerCamelCase__ : Any = spectrogram(
A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCamelCase__ : Any = log_spec[:, :-1]
lowerCamelCase__ : int = log_spec - 20.0
lowerCamelCase__ : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = True , A : Optional[int] = None , A : bool = False , A : bool = False , **A : str , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase__ : List[str] = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
lowerCamelCase__ : Any = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Optional[int] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase__ : Optional[int] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : int = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
lowerCamelCase__ : Dict = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ : List[str] = np.array(A ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ : str = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ : Tuple = padded_audio_features * self.padding_value
for i in range(len(A ) ):
lowerCamelCase__ : int = audio_features[i]
lowerCamelCase__ : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ : Optional[int] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCamelCase__ : Tuple = {'''audio_values''': padded_audio_features}
lowerCamelCase__ : Union[str, Any] = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 265 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : int
UpperCAmelCase : int
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int):
a : list[list[Edge]] = [[] for _ in range(__UpperCAmelCase)]
a : Union[str, Any] = size
def __getitem__( self : Tuple , __UpperCAmelCase : int):
return iter(self._graph[vertex])
@property
def __snake_case ( self : Optional[int]):
return self._size
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1.")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size).")
self._graph[from_vertex].append(Edge(__UpperCAmelCase , __UpperCAmelCase))
def __snake_case ( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : List[str] = deque([start_vertex])
a : list[int | None] = [None] * self.size
a : List[Any] = 0
while queue:
a : int = queue.popleft()
a : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
a : Dict = current_distance + edge.weight
a : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(__UpperCAmelCase , __UpperCAmelCase)
and new_distance >= dest_vertex_distance
):
continue
a : List[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex.")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
"""simple docstring"""
__lowercase = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = filter(lambda snake_case_ : p.requires_grad,model.parameters() )
_A : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if metric == "rouge2":
_A : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_A : Dict = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_A : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
_A : Optional[int] = ModelCheckpoint(
dirpath=snake_case_,filename=snake_case_,monitor=f'''val_{metric}''',mode="""max""",save_top_k=3,every_n_epochs=1,)
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return EarlyStopping(
monitor=f'''val_{metric}''',mode="""min""" if """loss""" in metric else """max""",patience=snake_case_,verbose=snake_case_,)
class lowercase ( pl.Callback ):
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def a__ ( self , _a , _a , _a , _a=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_A : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_A : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A : List[Any] = od / """test_results.txt"""
_A : List[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A : Optional[int] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_A : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a , """a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
_A : List[Any] = metrics[key]
if isinstance(_a , torch.Tensor ):
_A : str = val.item()
_A : str = F'''{key}: {val:.6f}\n'''
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
_A : List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def a__ ( self , _a , _a ) -> str:
try:
_A : int = pl_module.model.model.num_parameters()
except AttributeError:
_A : str = pl_module.model.num_parameters()
_A : Optional[int] = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self , _a , _a ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_a , _a , """test""" )
@rank_zero_only
def a__ ( self , _a , _a ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 343 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowercase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCAmelCase = "bert"
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=a__ , **a__ )
__a : Optional[Any] = vocab_size
__a : Tuple = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : str = hidden_act
__a : Dict = intermediate_size
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : int = initializer_range
__a : List[str] = layer_norm_eps
__a : Union[str, Any] = position_embedding_type
__a : str = use_cache
__a : Union[str, Any] = classifier_dropout
class __lowercase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
__a : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] ) | 160 | """simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44 | 0 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a :
def lowerCAmelCase_ ( self : Dict ):
torch.manual_seed(0 )
_UpperCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
_UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
_UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = inputs["""prompt"""]
_UpperCAmelCase = inputs["""generator"""]
_UpperCAmelCase = inputs["""num_inference_steps"""]
_UpperCAmelCase = inputs["""output_type"""]
if "image" in inputs:
_UpperCAmelCase = inputs["""image"""]
else:
_UpperCAmelCase = None
if "mask_image" in inputs:
_UpperCAmelCase = inputs["""mask_image"""]
else:
_UpperCAmelCase = None
if "original_image" in inputs:
_UpperCAmelCase = inputs["""original_image"""]
else:
_UpperCAmelCase = None
_UpperCAmelCase , _UpperCAmelCase = pipe.encode_prompt(__lowerCAmelCase )
# inputs with prompt converted to embeddings
_UpperCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
_UpperCAmelCase = image
if mask_image is not None:
_UpperCAmelCase = mask_image
if original_image is not None:
_UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = inputs["""generator"""]
_UpperCAmelCase = inputs["""num_inference_steps"""]
_UpperCAmelCase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
_UpperCAmelCase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
_UpperCAmelCase = image
if mask_image is not None:
_UpperCAmelCase = mask_image
if original_image is not None:
_UpperCAmelCase = original_image
_UpperCAmelCase = pipe_loaded(**__lowerCAmelCase )[0]
_UpperCAmelCase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1e-4 )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = pipe_loaded(**__lowerCAmelCase )[0]
_UpperCAmelCase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1e-4 )
| 30 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
if len(__UpperCamelCase ) == 0:
return array
UpperCAmelCase : Any =min(__UpperCamelCase ), max(__UpperCamelCase )
# Compute the variables
UpperCAmelCase : Optional[Any] =_max - _min + 1
UpperCAmelCase : Optional[int] =[0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase : int =i - _min
UpperCAmelCase : Optional[Any] =i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase : Any =0
for i in range(__UpperCamelCase ):
while holes_repeat[i] > 0:
UpperCAmelCase : Optional[Any] =holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by comma:\n''')
__snake_case = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 348 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Dict = tempfile.mkdtemp()
__lowercase : Any = BlipImageProcessor()
__lowercase : Optional[int] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__lowercase : str = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__lowercase : str = InstructBlipProcessor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).tokenizer
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).qformer_tokenizer
def _lowerCamelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase : Any = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ) -> str:
__lowercase : Any = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__lowercase : List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase : Dict = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
__lowercase : int = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Any = self.get_image_processor()
__lowercase : str = self.get_tokenizer()
__lowercase : Any = self.get_qformer_tokenizer()
__lowercase : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : int = self.prepare_image_inputs()
__lowercase : Union[str, Any] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
__lowercase : Tuple = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ) -> str:
__lowercase : str = self.get_image_processor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[Any] = self.get_qformer_tokenizer()
__lowercase : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : Dict = '''lower newer'''
__lowercase : int = processor(text=UpperCamelCase_ )
__lowercase : List[str] = tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
__lowercase : Union[str, Any] = qformer_tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Optional[int] = self.get_qformer_tokenizer()
__lowercase : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : Optional[int] = '''lower newer'''
__lowercase : Any = self.prepare_image_inputs()
__lowercase : List[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Any = self.get_image_processor()
__lowercase : List[str] = self.get_tokenizer()
__lowercase : Any = self.get_qformer_tokenizer()
__lowercase : Tuple = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : List[str] = processor.batch_decode(UpperCamelCase_ )
__lowercase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : List[str] = self.get_image_processor()
__lowercase : List[str] = self.get_tokenizer()
__lowercase : List[Any] = self.get_qformer_tokenizer()
__lowercase : Optional[Any] = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
__lowercase : Any = '''lower newer'''
__lowercase : Union[str, Any] = self.prepare_image_inputs()
__lowercase : Union[str, Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 249 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoencoderKL
SCREAMING_SNAKE_CASE = 'sample'
SCREAMING_SNAKE_CASE = 1e-2
@property
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : Tuple = (32, 32)
UpperCAmelCase__ : Any = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def _a (self ):
"""simple docstring"""
return (3, 32, 32)
@property
def _a (self ):
"""simple docstring"""
return (3, 32, 32)
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = self.model_class(**_lowerCamelCase )
model.to(_lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase__ : Any = model(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase__ : List[Any] = torch.randn_like(_lowerCamelCase )
UpperCAmelCase__ : Any = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase__ : Tuple = self.model_class(**_lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase__ : Optional[int] = model_a(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase__ : int = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase__ : List[str] = dict(model.named_parameters() )
UpperCAmelCase__ : str = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_lowerCamelCase )
UpperCAmelCase__ : Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase__ : List[Any] = model.to(_lowerCamelCase )
model.eval()
if torch_device == "mps":
UpperCAmelCase__ : Optional[int] = torch.manual_seed(0 )
else:
UpperCAmelCase__ : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
UpperCAmelCase__ : Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : List[Any] = image.to(_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , sample_posterior=_lowerCamelCase , generator=_lowerCamelCase ).sample
UpperCAmelCase__ : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase__ : List[Any] = torch.tensor(
[
-4.00_78e-01,
-3.83_23e-04,
-1.26_81e-01,
-1.14_62e-01,
2.00_95e-01,
1.08_93e-01,
-8.82_47e-02,
-3.03_61e-01,
-9.86_44e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase__ : str = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
UpperCAmelCase__ : Optional[int] = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) )
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCamelCase ) for s in shape] )}.npy"""
def _a (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self , _lowerCamelCase=0 , _lowerCamelCase=(4, 3, 512, 512) , _lowerCamelCase=False ):
"""simple docstring"""
UpperCAmelCase__ : Any = torch.floataa if fpaa else torch.floataa
UpperCAmelCase__ : Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) ).to(_lowerCamelCase ).to(_lowerCamelCase )
return image
def _a (self , _lowerCamelCase="CompVis/stable-diffusion-v1-4" , _lowerCamelCase=False ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "fp16" if fpaa else None
UpperCAmelCase__ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase__ : Dict = AutoencoderKL.from_pretrained(
_lowerCamelCase , subfolder="""vae""" , torch_dtype=_lowerCamelCase , revision=_lowerCamelCase , )
model.to(_lowerCamelCase ).eval()
return model
def _a (self , _lowerCamelCase=0 ):
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(_lowerCamelCase )
return torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : str = self.get_sd_vae_model()
UpperCAmelCase__ : int = self.get_sd_image(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.get_generator(_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
UpperCAmelCase__ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase__ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_sd_image(_lowerCamelCase , fpaa=_lowerCamelCase )
UpperCAmelCase__ : int = self.get_generator(_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Any = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
UpperCAmelCase__ : List[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase__ : int = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.get_sd_vae_model()
UpperCAmelCase__ : Tuple = self.get_sd_image(_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_lowerCamelCase ).sample
assert sample.shape == image.shape
UpperCAmelCase__ : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase__ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = self.get_sd_vae_model()
UpperCAmelCase__ : Any = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase__ : Dict = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase__ : Dict = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase__ : List[str] = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.get_sd_vae_model(fpaa=_lowerCamelCase )
UpperCAmelCase__ : Any = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase__ : List[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase__ : Any = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : str = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = self.get_sd_vae_model()
UpperCAmelCase__ : List[str] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.get_sd_vae_model()
UpperCAmelCase__ : Optional[int] = self.get_sd_image(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = self.get_generator(_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : int = model.encode(_lowerCamelCase ).latent_dist
UpperCAmelCase__ : Tuple = dist.sample(generator=_lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase__ : str = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase__ : Optional[int] = torch.tensor(_lowerCamelCase )
UpperCAmelCase__ : str = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase )
| 370 |
"""simple docstring"""
import numpy
# List of input, output pairs
_A = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_A = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_A = [2, 4, 1, 5]
_A = len(train_data)
_A = 0.009
def a__ ( lowerCAmelCase , lowerCAmelCase="train" ) -> int:
return calculate_hypothesis_value(lowerCAmelCase , lowerCAmelCase ) - output(
lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase ) -> int:
UpperCAmelCase__ : Dict = 0
for i in range(len(lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Any:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( lowerCAmelCase , lowerCAmelCase=m ) -> List[str]:
UpperCAmelCase__ : Optional[int] = 0
for i in range(lowerCAmelCase ):
if index == -1:
summation_value += _error(lowerCAmelCase )
else:
summation_value += _error(lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( lowerCAmelCase ) -> List[Any]:
UpperCAmelCase__ : int = summation_of_cost_derivative(lowerCAmelCase , lowerCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[str]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase__ : Any = 0.00_0002
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Union[str, Any] = 0
while True:
j += 1
UpperCAmelCase__ : str = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase ) ):
UpperCAmelCase__ : List[Any] = get_cost_derivative(i - 1 )
UpperCAmelCase__ : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase , lowerCAmelCase , atol=lowerCAmelCase , rtol=lowerCAmelCase , ):
break
UpperCAmelCase__ : Any = temp_parameter_vector
print(("""Number of iterations:""", j) )
def a__ ( ) -> Optional[int]:
for i in range(len(lowerCAmelCase ) ):
print(("""Actual output value:""", output(lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 166 | 0 |
"""simple docstring"""
class _UpperCAmelCase:
def __init__( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = len(__a)
_UpperCamelCase = [0] * len_array
if len_array > 0:
_UpperCamelCase = array[0]
for i in range(1 , __a):
_UpperCamelCase = self.prefix_sum[i - 1] + array[i]
def UpperCAmelCase ( self , __a , __a) -> int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCAmelCase ( self , __a) -> bool:
'''simple docstring'''
_UpperCamelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a)
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase_ = '''\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'''
def snake_case ( A__ ,A__ ,A__=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCamelCase_ (snake_case_ ):
def __init__( self : Optional[int] , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ) -> str:
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
UpperCAmelCase_ : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> Tuple:
if latents is None:
UpperCAmelCase_ : Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : int = latents.to(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str]=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : str = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase_ : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Any=0 ) -> Tuple:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ : Optional[int] = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self : int , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 512 , lowerCAmelCase_ : int = 512 , lowerCAmelCase_ : int = 100 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self._execution_device
UpperCAmelCase_ : str = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Tuple = torch.cat(lowerCAmelCase_ , dim=0 )
UpperCAmelCase_ : str = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Any = torch.cat(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
UpperCAmelCase_ : str = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
UpperCAmelCase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase_ )
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : Union[str, Any] = self.unet.config.in_channels
UpperCAmelCase_ : Union[str, Any] = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : int = {'image_embeds': image_embeds}
UpperCAmelCase_ : Dict = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ : List[Any] = noise_pred.chunk(2 )
UpperCAmelCase_ : Union[str, Any] = variance_pred.chunk(2 )
UpperCAmelCase_ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Dict = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
UpperCAmelCase_ : int = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : Tuple = image * 0.5 + 0.5
UpperCAmelCase_ : Union[str, Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 356 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Collection[float] | None = None ) -> None:
if components is None:
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[Any] = list(lowerCAmelCase_ )
def __len__( self : Union[str, Any] ) -> int:
return len(self.__components )
def __str__( self : List[str] ) -> str:
return "(" + ",".join(map(lowerCAmelCase_ , self.__components ) ) + ")"
def __add__( self : Dict , lowerCAmelCase_ : Vector ) -> Vector:
UpperCAmelCase_ : Optional[int] = len(self )
if size == len(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[Any] = [self.__components[i] + other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else:
raise Exception("must have the same size" )
def __sub__( self : List[str] , lowerCAmelCase_ : Vector ) -> Vector:
UpperCAmelCase_ : List[str] = len(self )
if size == len(lowerCAmelCase_ ):
UpperCAmelCase_ : List[Any] = [self.__components[i] - other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : Any , lowerCAmelCase_ : float ) -> Vector:
...
@overload
def __mul__( self : Optional[int] , lowerCAmelCase_ : Vector ) -> float:
...
def __mul__( self : Dict , lowerCAmelCase_ : float | Vector ) -> float | Vector:
if isinstance(lowerCAmelCase_ , (float, int) ):
UpperCAmelCase_ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(self ) == len(lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = len(self )
UpperCAmelCase_ : Dict = [self.__components[i] * other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return sum(lowerCAmelCase_ )
else: # error case
raise Exception("invalid operand!" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Vector:
return Vector(self.__components )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int ) -> float:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
UpperCAmelCase_ : List[str] = value
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
UpperCAmelCase_ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Vector , lowerCAmelCase_ : bool = False ) -> float:
UpperCAmelCase_ : int = self * other
UpperCAmelCase_ : Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case ( A__ ):
assert isinstance(A__ ,A__ )
return Vector([0] * dimension )
def snake_case ( A__ ,A__ ):
assert isinstance(A__ ,A__ ) and (isinstance(A__ ,A__ ))
UpperCAmelCase_ : Any = [0] * dimension
UpperCAmelCase_ : Dict = 1
return Vector(A__ )
def snake_case ( A__ ,A__ ,A__ ):
assert (
isinstance(A__ ,A__ )
and isinstance(A__ ,A__ )
and (isinstance(A__ ,(int, float) ))
)
return x * scalar + y
def snake_case ( A__ ,A__ ,A__ ):
random.seed(A__ )
UpperCAmelCase_ : Tuple = [random.randint(A__ ,A__ ) for _ in range(A__ )]
return Vector(A__ )
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : list[list[float]] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : List[Any] = matrix
UpperCAmelCase_ : List[Any] = w
UpperCAmelCase_ : List[Any] = h
def __str__( self : int ) -> str:
UpperCAmelCase_ : Tuple = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Any , lowerCAmelCase_ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase_ : List[Any] = []
for i in range(self.__height ):
UpperCAmelCase_ : Optional[Any] = [
self.__matrix[i][j] + other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Optional[int] , lowerCAmelCase_ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.__height ):
UpperCAmelCase_ : Union[str, Any] = [
self.__matrix[i][j] - other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Tuple , lowerCAmelCase_ : float ) -> Matrix:
...
@overload
def __mul__( self : Tuple , lowerCAmelCase_ : Vector ) -> Vector:
...
def __mul__( self : Any , lowerCAmelCase_ : float | Vector ) -> Vector | Matrix:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # matrix-vector
if len(lowerCAmelCase_ ) == self.__width:
UpperCAmelCase_ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
UpperCAmelCase_ : Any = [
self.__matrix[i][j] * other.component(lowerCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase_ , sum(lowerCAmelCase_ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(lowerCAmelCase_ , (int, float) ): # matrix-scalar
UpperCAmelCase_ : int = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
return None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.__height
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.__width
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCAmelCase_ : List[Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
UpperCAmelCase_ : Optional[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : Union[str, Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise Exception("Indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCAmelCase_ : List[Any] = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase_ ) for y in range(self.__width )
]
return sum(lowerCAmelCase_ )
def snake_case ( A__ ):
UpperCAmelCase_ : list[list[float]] = [[0] * n for _ in range(A__ )]
return Matrix(A__ ,A__ ,A__ )
def snake_case ( A__ ,A__ ,A__ ,A__ ):
random.seed(A__ )
UpperCAmelCase_ : list[list[float]] = [
[random.randint(A__ ,A__ ) for _ in range(A__ )] for _ in range(A__ )
]
return Matrix(A__ ,A__ ,A__ )
| 253 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.