code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class _lowercase ( tf.keras.layers.Layer ):
def __init__( self: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any=1 , UpperCamelCase__: Optional[Any]=False , **UpperCamelCase__: List[Any] ):
super().__init__(**_snake_case )
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : List[str] = d_embed
lowerCamelCase__ : Tuple = d_proj
lowerCamelCase__ : Optional[Any] = cutoffs + [vocab_size]
lowerCamelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCamelCase__ : Union[str, Any] = div_val
lowerCamelCase__ : Union[str, Any] = self.cutoffs[0]
lowerCamelCase__ : List[str] = len(self.cutoffs ) - 1
lowerCamelCase__ : List[Any] = self.shortlist_size + self.n_clusters
lowerCamelCase__ : Tuple = keep_order
lowerCamelCase__ : Any = []
lowerCamelCase__ : int = []
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Any ):
if self.n_clusters > 0:
lowerCamelCase__ : List[str] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=_snake_case , name="""cluster_weight""" )
lowerCamelCase__ : int = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=_snake_case , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCamelCase__ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=_snake_case , name=F'''out_projs_._{i}''' , )
self.out_projs.append(_snake_case )
else:
self.out_projs.append(_snake_case )
lowerCamelCase__ : Optional[int] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=_snake_case , name=F'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ : Any = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=_snake_case , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ : List[Any] = self.d_embed // (self.div_val**i)
lowerCamelCase__ : Optional[int] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=_snake_case , name=F'''out_projs_._{i}''' )
self.out_projs.append(_snake_case )
lowerCamelCase__ : Tuple = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=_snake_case , name=F'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ : int = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=_snake_case , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(_snake_case )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any]=None ):
lowerCamelCase__ : List[Any] = x
if proj is not None:
lowerCamelCase__ : str = tf.einsum("""ibd,ed->ibe""" , _snake_case , _snake_case )
return tf.einsum("""ibd,nd->ibn""" , _snake_case , _snake_case ) + b
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Union[str, Any] = shape_list(_snake_case )
lowerCamelCase__ : Tuple = tf.range(lp_size[0] , dtype=target.dtype )
lowerCamelCase__ : List[Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(_snake_case , _snake_case )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=False ):
lowerCamelCase__ : Optional[Any] = 0
if self.n_clusters == 0:
lowerCamelCase__ : int = self._logit(_snake_case , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCamelCase__ : str = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_snake_case , logits=_snake_case )
lowerCamelCase__ : List[Any] = tf.nn.log_softmax(_snake_case , axis=-1 )
else:
lowerCamelCase__ : str = shape_list(_snake_case )
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : Dict = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCamelCase__ : Any = (target >= l_idx) & (target < r_idx)
lowerCamelCase__ : Optional[Any] = tf.where(_snake_case )
lowerCamelCase__ : Union[str, Any] = tf.boolean_mask(_snake_case , _snake_case ) - l_idx
if self.div_val == 1:
lowerCamelCase__ : Tuple = self.out_layers[0][0][l_idx:r_idx]
lowerCamelCase__ : Any = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCamelCase__ : Dict = self.out_layers[i][0]
lowerCamelCase__ : Optional[Any] = self.out_layers[i][1]
if i == 0:
lowerCamelCase__ : List[str] = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCamelCase__ : List[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCamelCase__ : List[str] = self._logit(_snake_case , _snake_case , _snake_case , self.out_projs[0] )
lowerCamelCase__ : Dict = tf.nn.log_softmax(_snake_case )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCamelCase__ : Tuple = tf.boolean_mask(_snake_case , _snake_case )
lowerCamelCase__ : Tuple = self._gather_logprob(_snake_case , _snake_case )
else:
lowerCamelCase__ : Union[str, Any] = self._logit(_snake_case , _snake_case , _snake_case , self.out_projs[i] )
lowerCamelCase__ : List[Any] = tf.nn.log_softmax(_snake_case )
lowerCamelCase__ : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCamelCase__ : Tuple = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_snake_case )
if target is not None:
lowerCamelCase__ : Optional[int] = tf.boolean_mask(_snake_case , _snake_case )
lowerCamelCase__ : List[str] = tf.boolean_mask(_snake_case , _snake_case )
lowerCamelCase__ : Optional[int] = self._gather_logprob(_snake_case , _snake_case )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_snake_case , -cur_logprob , shape_list(_snake_case ) )
lowerCamelCase__ : Tuple = tf.concat(_snake_case , axis=-1 )
if target is not None:
if return_mean:
lowerCamelCase__ : List[str] = tf.reduce_mean(_snake_case )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_snake_case )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_snake_case , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 708 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any=13 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: int=4 , UpperCamelCase__: List[str]=[10, 20, 30, 40] , UpperCamelCase__: Tuple=[2, 2, 3, 2] , UpperCamelCase__: Tuple=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Dict="gelu" , UpperCamelCase__: Any=10 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: Optional[int]=["stage2", "stage3", "stage4"] , UpperCamelCase__: List[str]=3 , UpperCamelCase__: Optional[Any]=None , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : Optional[Any] = num_stages
lowerCamelCase__ : Optional[Any] = hidden_sizes
lowerCamelCase__ : List[str] = depths
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : int = type_sequence_label_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Any = out_features
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : Optional[Any] = scope
lowerCamelCase__ : Optional[Any] = num_stages
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Any ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowerCamelCase_ ( self: Dict ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : List[Any] = UperNetForSemanticSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ : Dict = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
a = False
a = False
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Any = UperNetModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self: Any ):
return
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = model_class(_lowerCAmelCase )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowerCamelCase_ ( self: int ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
def lowerCamelCase_ ( self: Dict ):
def check_hidden_states_output(UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Tuple = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : int = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowerCamelCase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = _config_zero_init(_lowerCAmelCase )
lowerCamelCase__ : Optional[int] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@slow
def lowerCamelCase_ ( self: Any ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
lowerCamelCase__ : int = Image.open(UpperCamelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
lowerCamelCase__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowerCAmelCase )
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : int = processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
with torch.no_grad():
lowerCamelCase__ : int = model(**_lowerCAmelCase )
lowerCamelCase__ : str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowerCamelCase__ : Dict = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
lowerCamelCase__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowerCAmelCase )
lowerCamelCase__ : Tuple = prepare_img()
lowerCamelCase__ : Any = processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**_lowerCAmelCase )
lowerCamelCase__ : str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 709 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A : List[Any] ={
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int =[
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 710 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _lowercase ( SCREAMING_SNAKE_CASE_ ):
a = """microsoft/speecht5_tts"""
a = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
a = """text_reader"""
a = SpeechTaProcessor
a = SpeechTaForTextToSpeech
a = SpeechTaHifiGan
a = ["""text"""]
a = ["""audio"""]
def lowerCamelCase_ ( self: int ):
if self.post_processor is None:
lowerCamelCase__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: str=None ):
lowerCamelCase__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors="""pt""" , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
lowerCamelCase__ : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
lowerCamelCase__ : Dict = torch.tensor(embeddings_dataset[7_305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[Any] ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Optional[int] ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 711 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : Dict = int(number**0.5 )
return number == sq * sq
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : str = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCamelCase__ : Optional[Any] = x_den * y_den * z_den
lowerCamelCase__ : Tuple = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 35 ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = set()
lowerCamelCase__ : Tuple = 42
lowerCamelCase__ : List[str] = Fraction(0 )
lowerCamelCase__ : Optional[Any] = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCamelCase__ : Dict = x_num * y_den + x_den * y_num
lowerCamelCase__ : Tuple = x_den * y_den
lowerCamelCase__ : str = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : Optional[int] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
lowerCamelCase__ : Tuple = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCamelCase__ : Any = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
lowerCamelCase__ : Tuple = int(sqrt(lowerCamelCase_ ) )
lowerCamelCase__ : Optional[int] = int(sqrt(lowerCamelCase_ ) )
lowerCamelCase__ : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : Any = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
lowerCamelCase__ : int = x_num * y_num
lowerCamelCase__ : Any = x_den * y_num + x_num * y_den
lowerCamelCase__ : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : List[str] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
lowerCamelCase__ : Optional[int] = x_num * x_num * y_num * y_num
lowerCamelCase__ : int = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = int(sqrt(lowerCamelCase_ ) )
lowerCamelCase__ : Optional[Any] = int(sqrt(lowerCamelCase_ ) )
lowerCamelCase__ : Tuple = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_A : Optional[int] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : int = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase__ : Tuple = json.loads(__snake_case )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase__ : Dict = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase__ : List[str] = json.loads(__snake_case )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __snake_case ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowercase ( _A ):
a = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def lowerCamelCase_ ( self: int ):
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , UpperCamelCase__ , )
@cached_property
def lowerCamelCase_ ( self: List[str] ):
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
lowerCamelCase__ : int = torch.device("""cpu""" )
lowerCamelCase__ : Any = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase__ : str = smp.local_rank()
lowerCamelCase__ : Dict = torch.device("""cuda""" , UpperCamelCase__ )
lowerCamelCase__ : str = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
lowerCamelCase__ : str = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
lowerCamelCase__ : Dict = torch.device("""cuda""" , self.local_rank )
lowerCamelCase__ : Tuple = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase__ : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase__ : Tuple = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
lowerCamelCase__ : Optional[int] = torch.device("""cuda""" , self.local_rank )
lowerCamelCase__ : Optional[Any] = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCamelCase__ )
return device
@property
def lowerCamelCase_ ( self: Dict ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
return not is_sagemaker_model_parallel_available()
@property
def lowerCamelCase_ ( self: Optional[int] ):
return False
| 713 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , __lowercase , )
class _lowercase ( __lowercase ):
'''simple docstring'''
a = RobertaConfig
a = 'roberta'
def __init__( self: Any , UpperCamelCase__: List[Any] ):
super().__init__(__A )
lowerCamelCase__ : Tuple = RobertaEmbeddings(__A )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. """ , __lowercase , )
class _lowercase ( __lowercase ):
'''simple docstring'''
a = RobertaConfig
a = 'roberta'
def __init__( self: List[Any] , UpperCamelCase__: Union[str, Any] ):
super().__init__(__A )
lowerCamelCase__ : int = config.num_labels
lowerCamelCase__ : Optional[Any] = config.num_hidden_layers
lowerCamelCase__ : Any = DeeRobertaModel(__A )
lowerCamelCase__ : Dict = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase__ : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__A )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict=None , UpperCamelCase__: List[str]=None , UpperCamelCase__: List[Any]=None , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: Any=None , UpperCamelCase__: str=None , UpperCamelCase__: Tuple=-1 , UpperCamelCase__: Optional[int]=False , ):
lowerCamelCase__ : List[str] = self.num_layers
try:
lowerCamelCase__ : str = self.roberta(
__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , )
lowerCamelCase__ : Union[str, Any] = outputs[1]
lowerCamelCase__ : Optional[int] = self.dropout(__A )
lowerCamelCase__ : Optional[Any] = self.classifier(__A )
lowerCamelCase__ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCamelCase__ : Any = e.message
lowerCamelCase__ : Dict = e.exit_layer
lowerCamelCase__ : Any = outputs[0]
if not self.training:
lowerCamelCase__ : Optional[Any] = entropy(__A )
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCamelCase__ : Any = MSELoss()
lowerCamelCase__ : Any = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase__ : List[str] = CrossEntropyLoss()
lowerCamelCase__ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCamelCase__ : Dict = []
for highway_exit in outputs[-1]:
lowerCamelCase__ : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(__A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCamelCase__ : Optional[int] = MSELoss()
lowerCamelCase__ : Union[str, Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase__ : List[str] = CrossEntropyLoss()
lowerCamelCase__ : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__A )
if train_highway:
lowerCamelCase__ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCamelCase__ : List[Any] = (loss,) + outputs
if not self.training:
lowerCamelCase__ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCamelCase__ : Optional[int] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 714 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( UpperCamelCase__ ):
a = ["""image_processor""", """tokenizer"""]
a = """ChineseCLIPImageProcessor"""
a = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self: List[str] , UpperCamelCase__: int=None , UpperCamelCase__: Optional[Any]=None , **UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
lowerCamelCase__ : Optional[Any] = kwargs.pop("""feature_extractor""" )
lowerCamelCase__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
lowerCamelCase__ : List[str] = self.image_processor
def __call__( self: List[str] , UpperCamelCase__: Tuple=None , UpperCamelCase__: int=None , UpperCamelCase__: Optional[int]=None , **UpperCamelCase__: List[Any] ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
lowerCamelCase__ : List[str] = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
lowerCamelCase__ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: Union[str, Any] ):
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Optional[int] , **UpperCamelCase__: str ):
return self.tokenizer.decode(*__A , **__A )
@property
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self: Dict ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
| 715 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _lowercase :
'''simple docstring'''
a = None
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ : List[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , """feat_extract.json""" )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Optional[Any] = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : str = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Any = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = self.feature_extraction_class()
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 716 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
_A : str =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : List[str] = os.path.dirname(os.path.realpath(UpperCamelCase ) )
lowerCamelCase__ : List[Any] = os.path.join(UpperCamelCase , """words.txt""" )
lowerCamelCase__ : Dict = ''
with open(UpperCamelCase ) as f:
lowerCamelCase__ : List[str] = f.readline()
lowerCamelCase__ : Optional[Any] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
lowerCamelCase__ : Dict = [
word
for word in [sum(ord(UpperCamelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 717 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCamelCase__ : Tuple = len(_lowercase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ : Tuple = 0
print(_lowercase , end=""",""" )
# Consider rest of the activities
for j in range(_lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowercase , end=""",""" )
lowerCamelCase__ : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Union[str, Any] =[1, 3, 0, 5, 8, 5]
_A : List[str] =[2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 718 |
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _lowercase :
a = 42
a = None
a = None
def SCREAMING_SNAKE_CASE_ () -> Any:
lowerCamelCase__ : Optional[Any] = Node(1 )
lowerCamelCase__ : List[str] = Node(2 )
lowerCamelCase__ : Union[str, Any] = Node(3 )
lowerCamelCase__ : Tuple = Node(4 )
lowerCamelCase__ : int = Node(5 )
return tree
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : list[Any] = []
if root is None:
return output
lowerCamelCase__ : Tuple = deque([root] )
while process_queue:
lowerCamelCase__ : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : list[Any] = []
def populate_output(UpperCamelCase , UpperCamelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_lowerCamelCase , _lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCamelCase__ : list[Any] = []
def populate_output(UpperCamelCase , UpperCamelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_lowerCamelCase , _lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
if root is None:
return []
lowerCamelCase__ : list[Sequence[Node | None]] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[Any] = height(_lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] = 0
return output
def SCREAMING_SNAKE_CASE_ () -> Tuple: # Main function for testing.
lowerCamelCase__ : Union[str, Any] = make_tree()
print(f'''In-order Traversal: {inorder(_lowerCamelCase )}''' )
print(f'''Pre-order Traversal: {preorder(_lowerCamelCase )}''' )
print(f'''Post-order Traversal: {postorder(_lowerCamelCase )}''' , """\n""" )
print(f'''Height of Tree: {height(_lowerCamelCase )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(_lowerCamelCase ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(_lowerCamelCase , level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: List[Any] = None , UpperCamelCase__: Dict = None , UpperCamelCase__: Tuple = False , UpperCamelCase__: List[Any] = False , UpperCamelCase__: str = None , UpperCamelCase__: Optional[Any] = None , **UpperCamelCase__: Dict , ):
super().__init__(
A__ , split=A__ , features=A__ , cache_dir=A__ , keep_in_memory=A__ , streaming=A__ , num_proc=A__ , **A__ , )
lowerCamelCase__ : Tuple = field
lowerCamelCase__ : Optional[int] = path_or_paths if isinstance(A__ , A__ ) else {self.split: path_or_paths}
lowerCamelCase__ : List[Any] = Json(
cache_dir=A__ , data_files=A__ , features=A__ , field=A__ , **A__ , )
def lowerCamelCase_ ( self: Any ):
# Build iterable dataset
if self.streaming:
lowerCamelCase__ : Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Any = None
self.builder.download_and_prepare(
download_config=A__ , download_mode=A__ , verification_mode=A__ , base_path=A__ , num_proc=self.num_proc , )
lowerCamelCase__ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=A__ , in_memory=self.keep_in_memory )
return dataset
class _lowercase :
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] = None , UpperCamelCase__: Any = None , **UpperCamelCase__: List[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
lowerCamelCase__ : int = dataset
lowerCamelCase__ : Any = path_or_buf
lowerCamelCase__ : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCamelCase__ : str = num_proc
lowerCamelCase__ : Union[str, Any] = """utf-8"""
lowerCamelCase__ : List[Any] = to_json_kwargs
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.to_json_kwargs.pop("""path_or_buf""" , A__ )
lowerCamelCase__ : List[Any] = self.to_json_kwargs.pop("""orient""" , """records""" )
lowerCamelCase__ : Optional[Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
lowerCamelCase__ : Optional[int] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
lowerCamelCase__ : int = self.to_json_kwargs.pop("""compression""" , A__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=A__ ) as buffer:
lowerCamelCase__ : Optional[Any] = self._write(file_obj=A__ , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""" )
lowerCamelCase__ : int = self._write(
file_obj=self.path_or_buf , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
return written
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str ):
lowerCamelCase__ : int = args
lowerCamelCase__ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(A__ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCamelCase__ : List[Any] = batch.to_pandas().to_json(
path_or_buf=A__ , orient=A__ , lines=A__ , index=A__ , **A__ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Union[str, Any] , ):
lowerCamelCase__ : List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
lowerCamelCase__ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(A__ )
else:
lowerCamelCase__ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , A__ , A__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(A__ )
return written
| 720 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A : Tuple =logging.get_logger(__name__)
_A : int ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_A : Any ={
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_A : Dict ={
'''allenai/longformer-base-4096''': 4_096,
'''allenai/longformer-large-4096''': 4_096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4_096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4_096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Dict = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCamelCase__ : Union[str, Any] = bs[:]
lowerCamelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : List[Any] = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Optional[Any] = set()
lowerCamelCase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : int = char
return pairs
class _lowercase ( __SCREAMING_SNAKE_CASE ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ['input_ids', 'attention_mask']
def __init__( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any]="replace" , UpperCamelCase__: Optional[int]="<s>" , UpperCamelCase__: Optional[Any]="</s>" , UpperCamelCase__: List[str]="</s>" , UpperCamelCase__: List[str]="<s>" , UpperCamelCase__: int="<unk>" , UpperCamelCase__: List[Any]="<pad>" , UpperCamelCase__: Optional[int]="<mask>" , UpperCamelCase__: List[str]=False , **UpperCamelCase__: str , ):
lowerCamelCase__ : str = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
lowerCamelCase__ : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
lowerCamelCase__ : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
lowerCamelCase__ : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
lowerCamelCase__ : str = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
lowerCamelCase__ : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase__ : Any = json.load(_a )
lowerCamelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding
lowerCamelCase__ : Union[str, Any] = bytes_to_unicode()
lowerCamelCase__ : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : Dict = dict(zip(_a , range(len(_a ) ) ) )
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : Dict = re.compile(R"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCamelCase_ ( self: Any ):
return len(self.encoder )
def lowerCamelCase_ ( self: Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple ):
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : str = tuple(_a )
lowerCamelCase__ : Optional[int] = get_pairs(_a )
if not pairs:
return token
while True:
lowerCamelCase__ : List[str] = min(_a , key=lambda UpperCamelCase__ : self.bpe_ranks.get(_a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : Dict = bigram
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Optional[int] = 0
while i < len(_a ):
try:
lowerCamelCase__ : int = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : str = tuple(_a )
lowerCamelCase__ : List[Any] = new_word
if len(_a ) == 1:
break
else:
lowerCamelCase__ : Optional[Any] = get_pairs(_a )
lowerCamelCase__ : List[str] = """ """.join(_a )
lowerCamelCase__ : Optional[int] = word
return word
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Any = []
for token in re.findall(self.pat , _a ):
lowerCamelCase__ : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[Any] ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: str ):
return self.decoder.get(_a )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Union[str, Any] = """""".join(_a )
lowerCamelCase__ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Any , UpperCamelCase__: List[str] = None ):
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Optional[int] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : Optional[Any] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + """\n""" )
lowerCamelCase__ : List[str] = 0
with open(_a , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
lowerCamelCase__ : str = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: List[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : str = [self.cls_token_id]
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self: int , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] = None , UpperCamelCase__: Any = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: int , UpperCamelCase__: int = None ):
lowerCamelCase__ : str = [self.sep_token_id]
lowerCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Optional[int]=False , **UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : str = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
lowerCamelCase__ : Any = """ """ + text
return (text, kwargs)
| 721 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[Any] ={
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class _lowercase ( _lowercase ):
a = "van"
def __init__( self: int , UpperCamelCase__: Tuple=224 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: str=[7, 3, 3, 3] , UpperCamelCase__: List[Any]=[4, 2, 2, 2] , UpperCamelCase__: int=[64, 128, 320, 512] , UpperCamelCase__: Dict=[3, 3, 12, 3] , UpperCamelCase__: Union[str, Any]=[8, 8, 4, 4] , UpperCamelCase__: Dict="gelu" , UpperCamelCase__: Any=0.02 , UpperCamelCase__: List[Any]=1e-6 , UpperCamelCase__: Optional[Any]=1e-2 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: List[Any]=0.0 , **UpperCamelCase__: Optional[Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Any = patch_sizes
lowerCamelCase__ : Union[str, Any] = strides
lowerCamelCase__ : List[str] = hidden_sizes
lowerCamelCase__ : Tuple = depths
lowerCamelCase__ : str = mlp_ratios
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = layer_scale_init_value
lowerCamelCase__ : Tuple = drop_path_rate
lowerCamelCase__ : Optional[Any] = dropout_rate
| 700 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( snake_case_ ):
a = """ClapFeatureExtractor"""
a = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self: str , UpperCamelCase__: List[str]=None , UpperCamelCase__: Any=None , UpperCamelCase__: Union[str, Any]=None , **UpperCamelCase__: Any ):
lowerCamelCase__ : int = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
lowerCamelCase__ : Optional[int] = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
lowerCamelCase__ : List[Any] = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
lowerCamelCase__ : Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , *UpperCamelCase__: int , **UpperCamelCase__: str ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , *UpperCamelCase__: List[str] , **UpperCamelCase__: List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = self.tokenizer.model_input_names
lowerCamelCase__ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class _lowercase :
def __init__( self: str , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Union[str, Any] = value
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = tree
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self: Tuple ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any]=13 , UpperCamelCase__: int=32 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: List[Any]=16 , UpperCamelCase__: List[str]=[1, 2, 1] , UpperCamelCase__: int=[2, 2, 4] , UpperCamelCase__: int=2 , UpperCamelCase__: Optional[Any]=2.0 , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Dict=0.0 , UpperCamelCase__: Any=0.0 , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[str]="gelu" , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: str=True , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Any=1e-5 , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: str=None , UpperCamelCase__: str=True , UpperCamelCase__: int=10 , UpperCamelCase__: str=8 , ):
lowerCamelCase__ : str = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Tuple = num_channels
lowerCamelCase__ : Optional[int] = embed_dim
lowerCamelCase__ : Tuple = depths
lowerCamelCase__ : List[str] = num_heads
lowerCamelCase__ : List[Any] = window_size
lowerCamelCase__ : Any = mlp_ratio
lowerCamelCase__ : Optional[Any] = qkv_bias
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : int = drop_path_rate
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : int = use_absolute_embeddings
lowerCamelCase__ : List[str] = patch_norm
lowerCamelCase__ : Optional[int] = layer_norm_eps
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[Any] = encoder_stride
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: List[str] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : Optional[int] = SwinvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: int ):
lowerCamelCase__ : Any = SwinvaForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Dict = SwinvaForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : str = self.type_sequence_label_size
lowerCamelCase__ : List[Any] = SwinvaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = SwinvaModelTester(self )
lowerCamelCase__ : int = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
def lowerCamelCase_ ( self: List[Any] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : str = False
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Dict = outputs.attentions
lowerCamelCase__ : Dict = len(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[int] = config.window_size**2
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCamelCase__ : Tuple = len(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : str = True
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
lowerCamelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCamelCase__ : Any = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
lowerCamelCase__ : Dict = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = outputs.hidden_states
lowerCamelCase__ : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swinv2 has a different seq_length
lowerCamelCase__ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase__ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Any = reshaped_hidden_states[0].shape
lowerCamelCase__ : List[str] = (
reshaped_hidden_states[0].view(UpperCamelCase__ , UpperCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : List[str] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Any = 3
lowerCamelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase__ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase__ : str = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Any = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = SwinvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 703 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 0 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_A : Optional[int] =300 # TEMPERATURE (unit = K)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> List[str]:
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : Union[str, Any] =logging.get_logger(__name__)
_A : Optional[Any] ={
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _lowercase ( __lowercase ):
a = """conditional_detr"""
a = ["""past_key_values"""]
a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self: List[Any] , UpperCamelCase__: str=True , UpperCamelCase__: str=None , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=300 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Any=2_048 , UpperCamelCase__: Dict=8 , UpperCamelCase__: List[str]=6 , UpperCamelCase__: List[Any]=2_048 , UpperCamelCase__: Tuple=8 , UpperCamelCase__: int=0.0 , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Any="relu" , UpperCamelCase__: Dict=256 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1.0 , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: int="sine" , UpperCamelCase__: Optional[Any]="resnet50" , UpperCamelCase__: str=True , UpperCamelCase__: str=False , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Union[str, Any]=5 , UpperCamelCase__: str=2 , UpperCamelCase__: Optional[Any]=1 , UpperCamelCase__: str=1 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: List[str]=5 , UpperCamelCase__: str=2 , UpperCamelCase__: Dict=0.25 , **UpperCamelCase__: str , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase__ : Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_A , _A ):
lowerCamelCase__ : str = backbone_config.get("""model_type""" )
lowerCamelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Tuple = config_class.from_dict(_A )
lowerCamelCase__ : Any = use_timm_backbone
lowerCamelCase__ : Any = backbone_config
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : Dict = num_queries
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : str = encoder_ffn_dim
lowerCamelCase__ : str = encoder_layers
lowerCamelCase__ : List[str] = encoder_attention_heads
lowerCamelCase__ : List[str] = decoder_ffn_dim
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : str = decoder_attention_heads
lowerCamelCase__ : Tuple = dropout
lowerCamelCase__ : List[str] = attention_dropout
lowerCamelCase__ : int = activation_dropout
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : int = init_std
lowerCamelCase__ : Any = init_xavier_std
lowerCamelCase__ : str = encoder_layerdrop
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : List[str] = encoder_layers
lowerCamelCase__ : Optional[Any] = auxiliary_loss
lowerCamelCase__ : List[Any] = position_embedding_type
lowerCamelCase__ : List[str] = backbone
lowerCamelCase__ : str = use_pretrained_backbone
lowerCamelCase__ : Optional[int] = dilation
# Hungarian matcher
lowerCamelCase__ : str = class_cost
lowerCamelCase__ : int = bbox_cost
lowerCamelCase__ : Dict = giou_cost
# Loss coefficients
lowerCamelCase__ : str = mask_loss_coefficient
lowerCamelCase__ : Optional[Any] = dice_loss_coefficient
lowerCamelCase__ : str = cls_loss_coefficient
lowerCamelCase__ : Optional[Any] = bbox_loss_coefficient
lowerCamelCase__ : Any = giou_loss_coefficient
lowerCamelCase__ : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self: Tuple ):
return self.d_model
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase__ : Any = self.backbone_config.to_dict()
lowerCamelCase__ : Tuple = self.__class__.model_type
return output
class _lowercase ( __lowercase ):
a = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self: str ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase_ ( self: int ):
return 1e-5
@property
def lowerCamelCase_ ( self: Any ):
return 12
| 705 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_A : List[str] =logging.get_logger(__name__)
_A : List[str] =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_A : Tuple =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
a = field(
default=__a , metadata={"""help""": """Model type selected in the list: """ + """, """.join(__a )} )
a = field(
default=__a , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a = field(
default=__a , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _lowercase ( __a ):
a = '''train'''
a = '''dev'''
class _lowercase ( __a ):
a = 42
a = 42
a = 42
a = 42
def __init__( self: Tuple , UpperCamelCase__: SquadDataTrainingArguments , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Union[str, Split] = Split.train , UpperCamelCase__: Optional[bool] = False , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[str] = "pt" , ):
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : List[Any] = is_language_sensitive
lowerCamelCase__ : Optional[int] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case__ , snake_case__ ):
try:
lowerCamelCase__ : List[Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
lowerCamelCase__ : List[Any] = mode
# Load data features from cache or dataset file
lowerCamelCase__ : List[str] = """v2""" if args.version_2_with_negative else """v1"""
lowerCamelCase__ : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + """.lock"""
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not args.overwrite_cache:
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : int = torch.load(snake_case__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ : Tuple = self.old_features["""features"""]
lowerCamelCase__ : str = self.old_features.get("""dataset""" , snake_case__ )
lowerCamelCase__ : List[Any] = self.old_features.get("""examples""" , snake_case__ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
""" future run""" )
else:
if mode == Split.dev:
lowerCamelCase__ : str = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ : Tuple = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ : Any = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case__ , )
lowerCamelCase__ : Optional[int] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , snake_case__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self: Optional[Any] ):
return len(self.features )
def __getitem__( self: Optional[int] , UpperCamelCase__: Union[str, Any] ):
# Convert to Tensors and build dataset
lowerCamelCase__ : Any = self.features[i]
lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ : Dict = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ : Optional[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ : Tuple = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ : str = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ : str = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ : Tuple = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ : str = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs | 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : int = 2
lowerCamelCase__ : Any = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: int , ):
lowerCamelCase__ : str = parent
lowerCamelCase__ : Tuple = 13
lowerCamelCase__ : int = 7
lowerCamelCase__ : Tuple = 30
lowerCamelCase__ : str = self.seq_length + self.mem_len
lowerCamelCase__ : Any = 15
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : str = [10, 50, 80]
lowerCamelCase__ : int = 32
lowerCamelCase__ : Dict = 32
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : str = 8
lowerCamelCase__ : Dict = 128
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : Dict = None
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[str] = 3
lowerCamelCase__ : Optional[Any] = self.vocab_size - 1
lowerCamelCase__ : List[Any] = 0.01
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Dict = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase_ ( self: List[str] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = TFTransfoXLModel(UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ ).to_tuple()
lowerCamelCase__ : List[str] = {"""input_ids""": input_ids_a, """mems""": mems_a}
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Tuple = TFTransfoXLLMHeadModel(UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = model(UpperCamelCase__ ).to_tuple()
lowerCamelCase__ : Optional[int] = {"""input_ids""": input_ids_a, """labels""": lm_labels}
lowerCamelCase__ , lowerCamelCase__ : str = model(UpperCamelCase__ ).to_tuple()
lowerCamelCase__ , lowerCamelCase__ : Any = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase__ : List[str] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
lowerCamelCase__ , lowerCamelCase__ : Dict = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Dict = TFTransfoXLForSequenceClassification(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Optional[Any] = config_and_inputs
lowerCamelCase__ : Dict = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
a = () if is_tf_available() else ()
a = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[str] = TFTransfoXLModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , d_embed=37 )
def lowerCamelCase_ ( self: List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Dict ):
self.model_tester.set_seed()
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
self.model_tester.set_seed()
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase__ : Union[str, Any] = model.get_output_embeddings()
assert isinstance(UpperCamelCase__ , tf.keras.layers.Layer )
lowerCamelCase__ : Any = model.get_bias()
assert name is None
else:
lowerCamelCase__ : List[str] = model.get_output_embeddings()
assert x is None
lowerCamelCase__ : int = model.get_bias()
assert name is None
def lowerCamelCase_ ( self: str ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase_ ( self: List[Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = TFTransfoXLModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@require_tf
class _lowercase ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
lowerCamelCase__ : Optional[Any] = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase__ : Optional[int] = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase__ : List[Any] = model.generate(UpperCamelCase__ , max_length=200 , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
| 708 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[Any]=7 , UpperCamelCase__: Any=False , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Any=False , UpperCamelCase__: Any=False , UpperCamelCase__: Optional[int]=19 , UpperCamelCase__: List[str]=32 , UpperCamelCase__: int=5 , UpperCamelCase__: List[Any]=4 , UpperCamelCase__: List[str]=37 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: str=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: Tuple=16 , UpperCamelCase__: int=2 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=3 , UpperCamelCase__: List[str]=4 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : Optional[int] = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Any = type_sequence_label_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Any = num_choices
lowerCamelCase__ : Optional[int] = scope
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : str = None
lowerCamelCase__ : int = None
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_SCREAMING_SNAKE_CASE , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = EsmForProteinFolding(config=_SCREAMING_SNAKE_CASE ).float()
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase__ : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Tuple = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
a = False
a = (EsmForProteinFolding,) if is_torch_available() else ()
a = ()
a = {} if is_torch_available() else {}
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = EsmFoldModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCamelCase_ ( self: str ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip("""Does not support attention outputs""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
@unittest.skip
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase_ ( self: Dict ):
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
@unittest.skip("""ESMFold only has one output format.""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase_ ( self: Dict ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Dict ):
pass
@require_torch
class _lowercase ( lowerCAmelCase__ ):
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
lowerCamelCase__ : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase__ : str = model(_SCREAMING_SNAKE_CASE )["""positions"""]
lowerCamelCase__ : int = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 709 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631 | 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_A : Union[str, Any] =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowercase ( _A ):
def __init__( self: Union[str, Any] , *UpperCamelCase__: List[Any] , UpperCamelCase__: Any=None , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Optional[int]=None , **UpperCamelCase__: Optional[Any] ):
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : Any = eval_examples
lowerCamelCase__ : List[str] = post_process_function
lowerCamelCase__ : Optional[Any] = quant_trainer_args
lowerCamelCase__ : Any = 128 # default number of calibration samples
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any]=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
lowerCamelCase__ : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCamelCase__ : Optional[Any] = self._remove_unused_columns(UpperCamelCase__ , description="""Calibration""" )
return DataLoader(
UpperCamelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase__ , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any=None ):
lowerCamelCase__ : Any = self.train_dataset if calib_dataset is None else calib_dataset
lowerCamelCase__ : Optional[int] = self.get_calib_dataloader(UpperCamelCase__ )
lowerCamelCase__ : List[str] = self.model
quant_trainer.configure_model(UpperCamelCase__ , self.quant_trainer_args , calib=UpperCamelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase__ )
logger.info("""***** Running calibration *****""" )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(UpperCamelCase__ ):
# Prediction step
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.prediction_step(UpperCamelCase__ , UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase__ , self.quant_trainer_args )
lowerCamelCase__ : str = model
def lowerCamelCase_ ( self: int , UpperCamelCase__: str=None , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: List[str]=None , UpperCamelCase__: str = "eval" ):
lowerCamelCase__ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase__ : Any = self.get_eval_dataloader(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__ : Any = self.compute_metrics
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__ : List[Any] = eval_loop(
UpperCamelCase__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , )
finally:
lowerCamelCase__ : List[Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCamelCase__ : int = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions )
lowerCamelCase__ : List[Any] = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCamelCase__ : Any = metrics.pop(UpperCamelCase__ )
self.log(UpperCamelCase__ )
else:
lowerCamelCase__ : str = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase__ : Dict = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Dict , UpperCamelCase__: str=None , UpperCamelCase__: str = "test" ):
lowerCamelCase__ : Any = self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__ : Optional[int] = self.compute_metrics
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__ : int = eval_loop(
UpperCamelCase__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , )
finally:
lowerCamelCase__ : Optional[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase__ : Any = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions , """predict""" )
lowerCamelCase__ : int = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCamelCase__ : Optional[Any] = metrics.pop(UpperCamelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Optional[int]="./" ):
lowerCamelCase__ : Union[str, Any] = self.eval_dataset
lowerCamelCase__ : List[str] = self.get_eval_dataloader(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = next(iter(UpperCamelCase__ ) )
# saving device - to make it consistent
lowerCamelCase__ : Union[str, Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
lowerCamelCase__ : str = tuple(v.to(UpperCamelCase__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : str = self.model.to(UpperCamelCase__ )
model.eval()
model.float()
lowerCamelCase__ : Optional[Any] = model.module if hasattr(UpperCamelCase__ , """module""" ) else model
quant_trainer.configure_model(UpperCamelCase__ , self.quant_trainer_args )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """model.onnx""" )
logger.info(F'''exporting model to {output_model_file}''' )
lowerCamelCase__ : Optional[int] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , export_params=UpperCamelCase__ , opset_version=13 , do_constant_folding=UpperCamelCase__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=UpperCamelCase__ , )
logger.info("""onnx export finished""" ) | 710 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 0 |
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
if index == r:
for j in range(UpperCamelCase ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCamelCase__ : List[str] = arr[i]
combination_util(UpperCamelCase , UpperCamelCase , UpperCamelCase , index + 1 , UpperCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
# A temporary array to store all combination one by one
lowerCamelCase__ : int = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(UpperCamelCase , UpperCamelCase , UpperCamelCase , 0 , UpperCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_A : Any =[10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 711 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int | float:
if len(__snake_case ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(__snake_case )
or left < -len(__snake_case )
or right >= len(__snake_case )
or right < -len(__snake_case )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
lowerCamelCase__ : Dict = (left + right) >> 1 # the middle
lowerCamelCase__ : Dict = find_max(__snake_case , __snake_case , __snake_case ) # find max in range[left, mid]
lowerCamelCase__ : Any = find_max(__snake_case , mid + 1 , __snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCamelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCamelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCamelCase_ )
return parser.parse_args()
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : int = parse_args()
# Import training_script as a module.
lowerCamelCase__ : Tuple = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase__ : Optional[Any] = script_fpath.stem
lowerCamelCase__ : int = importlib.import_module(lowerCamelCase_ )
# Patch sys.argv
lowerCamelCase__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[int] = torch.nn.Linear(10 , 10 )
lowerCamelCase__ : str = torch.optim.SGD(model.parameters() , 0.1 )
lowerCamelCase__ : int = Accelerator()
lowerCamelCase__ : List[Any] = accelerator.prepare(UpperCamelCase_ )
try:
pickle.loads(pickle.dumps(UpperCamelCase_ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 714 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[int] ={
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] =['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =[
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =[
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Tuple =logging.get_logger(__name__)
_A : Any ={
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class _lowercase ( _lowercase ):
'''simple docstring'''
a = """open-llama"""
def __init__( self: str , UpperCamelCase__: Optional[int]=100_000 , UpperCamelCase__: List[str]=4_096 , UpperCamelCase__: Tuple=11_008 , UpperCamelCase__: Union[str, Any]=32 , UpperCamelCase__: str=32 , UpperCamelCase__: str="silu" , UpperCamelCase__: int=2_048 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Dict=1e-6 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=0 , UpperCamelCase__: int=1 , UpperCamelCase__: int=2 , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Tuple=0.1 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: Dict=None , **UpperCamelCase__: Optional[int] , ):
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Optional[int] = rms_norm_eps
lowerCamelCase__ : Optional[Any] = use_cache
lowerCamelCase__ : str = kwargs.pop(
"""use_memorry_efficient_attention""" , lowercase_ )
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : str = attention_dropout_prob
lowerCamelCase__ : List[Any] = use_stable_embedding
lowerCamelCase__ : Any = shared_input_output_embedding
lowerCamelCase__ : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def lowerCamelCase_ ( self: Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''' )
lowerCamelCase__ : List[str] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCamelCase__ : str = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 716 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 0 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_A : Tuple ='''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_A : List[str] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_A : int =transformers.models.auto.configuration_auto.CONFIG_MAPPING
_A : List[str] ={
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Optional[Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowerCamelCase__ : Tuple = True
# Deal with multi-line cases
elif (
re.search(
rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , snake_case__ , )
is not None
):
lowerCamelCase__ : int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCamelCase__ : Any = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCamelCase__ : Tuple = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
lowerCamelCase__ : List[Any] = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
lowerCamelCase__ : Optional[int] = True
if not attribute_used:
lowerCamelCase__ : Union[str, Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCamelCase__ : Any = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCamelCase__ : List[str] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCamelCase__ : List[Any] = True
elif attribute.endswith("""_token_id""" ):
lowerCamelCase__ : Tuple = True
# configuration class specific cases
if not case_allowed:
lowerCamelCase__ : List[str] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCamelCase__ : List[str] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Dict = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCamelCase__ : Tuple = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
lowerCamelCase__ : int = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCamelCase__ : Union[str, Any] = {}
if len(config_class.attribute_map ) > 0:
lowerCamelCase__ : List[str] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCamelCase__ : Union[str, Any] = inspect.getsourcefile(snake_case__ )
lowerCamelCase__ : Optional[Any] = os.path.dirname(snake_case__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCamelCase__ : Union[str, Any] = [os.path.join(snake_case__ , snake_case__ ) for fn in os.listdir(snake_case__ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
lowerCamelCase__ : Union[str, Any] = []
for path in modeling_paths:
if os.path.isfile(snake_case__ ):
with open(snake_case__ ) as fp:
modeling_sources.append(fp.read() )
lowerCamelCase__ : Optional[int] = []
for config_param, default_value in zip(snake_case__ , snake_case__ ):
# `attributes` here is all the variant names for `config_param`
lowerCamelCase__ : int = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Dict = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCamelCase__ : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda UpperCamelCase : inspect.isclass(snake_case__ )
and issubclass(snake_case__ , snake_case__ )
and inspect.getmodule(snake_case__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCamelCase__ : str = check_config_attributes_being_used(snake_case__ )
if len(snake_case__ ) > 0:
lowerCamelCase__ : Union[str, Any] = unused_attributes
if len(snake_case__ ) > 0:
lowerCamelCase__ : Dict = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(snake_case__ )
if __name__ == "__main__":
check_config_attributes()
| 717 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 0 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_A : List[Any] =logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , ) -> List[Any]:
lowerCamelCase__ : str = bnb_quantization_config.load_in_abit
lowerCamelCase__ : Tuple = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
lowerCamelCase__ : Union[str, Any] = []
# custom device map
if isinstance(UpperCamelCase , UpperCamelCase ) and len(device_map.keys() ) > 1:
lowerCamelCase__ : Any = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__ : Union[str, Any] = get_keys_to_not_convert(UpperCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__ : str = []
lowerCamelCase__ : List[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCamelCase )
# compatibility with peft
lowerCamelCase__ : Tuple = load_in_abit
lowerCamelCase__ : Dict = load_in_abit
lowerCamelCase__ : Optional[Any] = get_parameter_device(UpperCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
lowerCamelCase__ : List[Any] = replace_with_bnb_layers(UpperCamelCase , UpperCamelCase , modules_to_not_convert=UpperCamelCase )
# convert param to the right dtype
lowerCamelCase__ : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__ : Tuple = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCamelCase ):
param.to(UpperCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowerCamelCase__ : Optional[int] = replace_with_bnb_layers(
UpperCamelCase , UpperCamelCase , modules_to_not_convert=UpperCamelCase )
lowerCamelCase__ : Any = get_quantized_model_device_map(
UpperCamelCase , UpperCamelCase , UpperCamelCase , max_memory=UpperCamelCase , no_split_module_classes=UpperCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__ : Any = True
lowerCamelCase__ : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCamelCase , offload_state_dict=UpperCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCamelCase , device_map=UpperCamelCase , offload_dir=UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ) -> Union[str, Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__ : Optional[int] = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{\'\':torch.cuda.current_device()}`.""" )
if isinstance(UpperCamelCase , UpperCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or """
"""\'sequential\'.""" )
lowerCamelCase__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Dict = special_dtypes
lowerCamelCase__ : Optional[Any] = no_split_module_classes
lowerCamelCase__ : Tuple = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__ : Optional[int] = get_balanced_memory(
UpperCamelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase__ : List[Any] = max_memory
lowerCamelCase__ : Tuple = infer_auto_device_map(UpperCamelCase , **UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
# check if don't have any quantized module on the cpu
lowerCamelCase__ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__ : Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None ) -> Union[str, Any]:
if modules_to_not_convert is None:
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[Any] = _replace_with_bnb_layers(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ : List[str] = []
current_key_name.append(UpperCamelCase )
if isinstance(UpperCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__ : Dict = '''.'''.join(UpperCamelCase )
lowerCamelCase__ : List[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__ : Union[str, Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__ : int = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__ : int = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can\'t be both False""" )
lowerCamelCase__ : Union[str, Any] = module.weight.data
if module.bias is not None:
lowerCamelCase__ : Dict = module.bias.data
bnb_module.requires_grad_(UpperCamelCase )
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ : Optional[Any] = _replace_with_bnb_layers(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
with init_empty_weights():
lowerCamelCase__ : int = deepcopy(UpperCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__ : int = find_tied_parameters(UpperCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ : Any = sum(UpperCamelCase , [] )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase ) > 0
# Check if it is a base model
lowerCamelCase__ : Any = False
if hasattr(UpperCamelCase , """base_model_prefix""" ):
lowerCamelCase__ : List[str] = not hasattr(UpperCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ : Optional[Any] = list(model.named_children() )
lowerCamelCase__ : Any = [list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ : Any = set(UpperCamelCase ) - set(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = list(set(UpperCamelCase ) ) + list(UpperCamelCase )
# remove ".weight" from the keys
lowerCamelCase__ : List[str] = ['''.weight''', '''.bias''']
lowerCamelCase__ : Dict = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ : int = name.replace(UpperCamelCase , """""" )
filtered_module_names.append(UpperCamelCase )
return filtered_module_names
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
for m in model.modules():
if isinstance(UpperCamelCase , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCamelCase , UpperCamelCase , 0 , dtype=UpperCamelCase , value=UpperCamelCase )
lowerCamelCase__ : Optional[Any] = param_name
lowerCamelCase__ : int = model
if "." in tensor_name:
lowerCamelCase__ : List[Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
lowerCamelCase__ : str = getattr(UpperCamelCase , UpperCamelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
lowerCamelCase__ : Dict = new_module
lowerCamelCase__ : Optional[Any] = splits[-1]
# offload weights
lowerCamelCase__ : Any = False
offload_weight(module._parameters[tensor_name] , UpperCamelCase , UpperCamelCase , index=UpperCamelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , UpperCamelCase , index=UpperCamelCase , )
else:
offload_weight(UpperCamelCase , UpperCamelCase , UpperCamelCase , index=UpperCamelCase )
offload_weight(UpperCamelCase , param_name.replace("""weight""" , """SCB""" ) , UpperCamelCase , index=UpperCamelCase )
set_module_tensor_to_device(UpperCamelCase , UpperCamelCase , """meta""" , dtype=UpperCamelCase , value=torch.empty(*param.size() ) )
| 718 |
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : int =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class _lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self: List[str] , **UpperCamelCase__: List[Any] ):
super().__init__(**snake_case__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self: List[Any] , UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: Union[str, Any] ):
return super().__call__(snake_case__ , **snake_case__ )
def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ : Optional[Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCamelCase__ : Tuple = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: List[str]=None , UpperCamelCase__: Any="This is a photo of {}." ):
lowerCamelCase__ : Tuple = load_image(snake_case__ )
lowerCamelCase__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCamelCase__ : Any = candidate_labels
lowerCamelCase__ : Optional[Any] = [hypothesis_template.format(snake_case__ ) for x in candidate_labels]
lowerCamelCase__ : Tuple = self.tokenizer(snake_case__ , return_tensors=self.framework , padding=snake_case__ )
lowerCamelCase__ : int = [text_inputs]
return inputs
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : Dict = model_inputs.pop("""candidate_labels""" )
lowerCamelCase__ : Optional[int] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , snake_case__ ):
lowerCamelCase__ : List[Any] = text_inputs[0]
else:
# Batching case.
lowerCamelCase__ : Dict = text_inputs[0][0]
lowerCamelCase__ : List[Any] = self.model(**snake_case__ , **snake_case__ )
lowerCamelCase__ : List[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] ):
lowerCamelCase__ : List[Any] = model_outputs.pop("""candidate_labels""" )
lowerCamelCase__ : Optional[int] = model_outputs["logits"][0]
if self.framework == "pt":
lowerCamelCase__ : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCamelCase__ : List[str] = probs.tolist()
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase__ : Optional[Any] = [scores]
elif self.framework == "tf":
lowerCamelCase__ : Any = stable_softmax(snake_case__ , axis=-1 )
lowerCamelCase__ : str = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCamelCase__ : List[Any] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(snake_case__ , snake_case__ ) , key=lambda UpperCamelCase__ : -x[0] )
]
return result
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_A : str =logging.getLogger(__name__)
class _lowercase ( __lowerCamelCase ):
def __init__( self: List[str] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple=None ):
super().__init__(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , init_retrieval=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ : int = None
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Tuple ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
lowerCamelCase__ : Union[str, Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
lowerCamelCase__ : Tuple = str(distributed_port + 1 )
lowerCamelCase__ : List[Any] = dist.new_group(ranks=SCREAMING_SNAKE_CASE_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase_ ( self: int ):
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase_ ( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any=torch.floataa ):
lowerCamelCase__ : List[Any] = torch.empty(SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
dist.scatter(SCREAMING_SNAKE_CASE_ , src=0 , scatter_list=SCREAMING_SNAKE_CASE_ , group=self.process_group )
return target_tensor
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
lowerCamelCase__ : Optional[Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , SCREAMING_SNAKE_CASE_ )
return ifname
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] ):
# single GPU training
if not dist.is_initialized():
lowerCamelCase__ , lowerCamelCase__ : str = self._main_retrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ )
# distributed training
lowerCamelCase__ : Optional[int] = dist.get_world_size(group=self.process_group )
# gather logic
lowerCamelCase__ : Optional[Any] = None
if self._is_main():
lowerCamelCase__ : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(SCREAMING_SNAKE_CASE_ )]
dist.gather(torch.tensor(SCREAMING_SNAKE_CASE_ ) , dst=0 , gather_list=SCREAMING_SNAKE_CASE_ , group=self.process_group )
# scatter logic
lowerCamelCase__ : Union[str, Any] = question_hidden_states.shape[0]
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Union[str, Any] = []
if self._is_main():
assert len(SCREAMING_SNAKE_CASE_ ) == world_size
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self._main_retrieve(torch.cat(SCREAMING_SNAKE_CASE_ ).numpy() , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ , lowerCamelCase__ : Dict = torch.tensor(SCREAMING_SNAKE_CASE_ ), torch.tensor(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Union[str, Any] = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Any = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Tuple = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs] , target_type=torch.intaa )
lowerCamelCase__ : str = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ )
| 720 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A : List[str] ={
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =["""ConvNextFeatureExtractor"""]
_A : Optional[Any] =["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =[
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 721 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
_A : Optional[Any] =tuple[int, int]
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: set[int] , UpperCamelCase__: Mapping[EdgeT, int] ):
lowerCamelCase__ : set[int] = vertices
lowerCamelCase__ : dict[EdgeT, int] = {
(min(UpperCAmelCase__ ), max(UpperCAmelCase__ )): weight for edge, weight in edges.items()
}
def lowerCamelCase_ ( self: Any , UpperCamelCase__: EdgeT , UpperCamelCase__: int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCamelCase__ : str = weight
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Graph = Graph({min(self.vertices )} , {} )
lowerCamelCase__ : EdgeT
lowerCamelCase__ : int
lowerCamelCase__ : EdgeT
lowerCamelCase__ : int
while len(subgraph.vertices ) < len(self.vertices ):
lowerCamelCase__ : List[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCamelCase__ : List[Any] = edge
lowerCamelCase__ : int = weight
subgraph.add_edge(UpperCAmelCase__ , UpperCAmelCase__ )
return subgraph
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "p107_network.txt" ) -> int:
lowerCamelCase__ : str = os.path.abspath(os.path.dirname(UpperCamelCase ) )
lowerCamelCase__ : str = os.path.join(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : dict[EdgeT, int] = {}
lowerCamelCase__ : list[str]
lowerCamelCase__ : int
lowerCamelCase__ : int
with open(UpperCamelCase ) as f:
lowerCamelCase__ : Tuple = f.read().strip().split("""\n""" )
lowerCamelCase__ : Union[str, Any] = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(UpperCamelCase ) ):
for edgea in range(UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCamelCase__ : Tuple = int(adjaceny_matrix[edgea][edgea] )
lowerCamelCase__ : Graph = Graph(set(range(len(UpperCamelCase ) ) ) , UpperCamelCase )
lowerCamelCase__ : Graph = graph.prims_algorithm()
lowerCamelCase__ : int = sum(graph.edges.values() )
lowerCamelCase__ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 700 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 0 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_A : Union[str, Any] =list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_A : Tuple =[file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
_A : List[Any] =[file for file in filepaths if ''' ''' in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
_A : List[Any] =[file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
_A : str =[file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
_A : Dict =len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
return "".join(sorted(lowerCamelCase_ ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
return word_by_signature[signature(lowerCamelCase_ )]
_A : str =Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
_A : List[Any] =sorted({word.strip().lower() for word in data.splitlines()})
_A : Tuple =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_A : Union[str, Any] ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 702 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _lowercase :
def __init__( self: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int]=14 , UpperCamelCase__: Optional[Any]=7 , UpperCamelCase__: Tuple=True , UpperCamelCase__: int=True , UpperCamelCase__: Any=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=99 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: List[str]=4 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=4 , UpperCamelCase__: int=37 , UpperCamelCase__: List[str]="gelu" , UpperCamelCase__: str=0.1 , UpperCamelCase__: Optional[int]=0.1 , UpperCamelCase__: List[Any]=512 , UpperCamelCase__: Union[str, Any]=0.02 , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : Optional[int] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : Optional[int] = use_token_type_ids
lowerCamelCase__ : Tuple = use_labels
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : Dict = rotary_dim
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : str = vocab_size - 1
lowerCamelCase__ : Dict = vocab_size - 1
lowerCamelCase__ : int = vocab_size - 1
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : List[Any] = None
if self.use_input_mask:
lowerCamelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Tuple = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = config_and_inputs
lowerCamelCase__ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : str = 20
lowerCamelCase__ : Optional[Any] = model_class_name(_lowercase )
lowerCamelCase__ : Optional[int] = model.init_cache(input_ids.shape[0] , _lowercase )
lowerCamelCase__ : Tuple = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCamelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
lowerCamelCase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase__ : Any = model(
input_ids[:, -1:] , attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , position_ids=_lowercase , )
lowerCamelCase__ : Optional[Any] = model(_lowercase )
lowerCamelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Dict = 20
lowerCamelCase__ : Optional[Any] = model_class_name(_lowercase )
lowerCamelCase__ : Optional[int] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCamelCase__ : Tuple = model.init_cache(input_ids.shape[0] , _lowercase )
lowerCamelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : Any = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
lowerCamelCase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase__ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowercase , position_ids=_lowercase , )
lowerCamelCase__ : Any = model(_lowercase , attention_mask=_lowercase )
lowerCamelCase__ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
a = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
a = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self: List[str] ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase , _lowercase )
def lowerCamelCase_ ( self: List[Any] ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowercase , _lowercase , _lowercase , _lowercase )
@tooslow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Tuple = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowerCamelCase__ : int = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=_lowercase , truncation=_lowercase )
lowerCamelCase__ : List[str] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = model.config.eos_token_id
lowerCamelCase__ : Union[str, Any] = jax.jit(model.generate )
lowerCamelCase__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
lowerCamelCase__ : Any = [
"""Hello this is a long string of text.\n\nI\'m trying to get the text of the""",
"""Hey, I\'m a little late to the party. I\'m going to""",
]
self.assertListEqual(_lowercase , _lowercase )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : str = self._prepare_for_class(_lowercase , _lowercase )
lowerCamelCase__ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : List[str] = getattr(_lowercase , _lowercase )
lowerCamelCase__ , lowerCamelCase__ : Any = pt_inputs["""input_ids"""].shape
lowerCamelCase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
lowerCamelCase__ : int = 0
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Optional[Any] = pt_model_class(_lowercase ).eval()
lowerCamelCase__ : List[str] = model_class(_lowercase , dtype=jnp.floataa )
lowerCamelCase__ : Any = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowercase )
lowerCamelCase__ : Optional[int] = fx_state
with torch.no_grad():
lowerCamelCase__ : str = pt_model(**_lowercase ).to_tuple()
lowerCamelCase__ : Any = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowercase )
lowerCamelCase__ : Dict = model_class.from_pretrained(_lowercase , from_pt=_lowercase )
lowerCamelCase__ : Optional[Any] = fx_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Optional[int] = self._prepare_for_class(_lowercase , _lowercase )
lowerCamelCase__ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : List[Any] = getattr(_lowercase , _lowercase )
lowerCamelCase__ : Union[str, Any] = pt_model_class(_lowercase ).eval()
lowerCamelCase__ : Union[str, Any] = model_class(_lowercase , dtype=jnp.floataa )
lowerCamelCase__ : str = load_flax_weights_in_pytorch_model(_lowercase , fx_model.params )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = pt_inputs["""input_ids"""].shape
lowerCamelCase__ : Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : int = 1
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = pt_model(**_lowercase ).to_tuple()
lowerCamelCase__ : List[Any] = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowercase )
lowerCamelCase__ : int = pt_model_class.from_pretrained(_lowercase , from_flax=_lowercase )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = pt_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ : str = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCamelCase__ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 703 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_A : Tuple =[
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
for pegasus_name, hf_name in PATTERNS:
lowerCamelCase__ : Optional[Any] = k.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return k
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : Union[str, Any] = DEFAULTS.copy()
cfg_kwargs.update(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Union[str, Any] = PegasusConfig(**__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Any = PegasusForConditionalGeneration(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Dict = torch_model.model.state_dict()
lowerCamelCase__ : Union[str, Any] = {}
for k, v in tf_weights.items():
lowerCamelCase__ : List[str] = rename_state_dict_key(__SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowerCamelCase__ : Dict = v.T
lowerCamelCase__ : Optional[Any] = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowerCamelCase__ : Optional[int] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowerCamelCase__ : Union[str, Any] = mapping["shared.weight"]
lowerCamelCase__ : Optional[int] = mapping["shared.weight"]
lowerCamelCase__ : Optional[int] = {k: torch.zeros_like(__SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Optional[int] = torch_model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Union[str, Any] = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def SCREAMING_SNAKE_CASE_ (UpperCamelCase="./ckpt/aeslc/model.ckpt-32000" ) -> str:
lowerCamelCase__ : Any = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Tuple = {}
lowerCamelCase__ : Optional[int] = ["Adafactor", "global_step"]
for name, shape in tqdm(__SCREAMING_SNAKE_CASE , desc="""converting tf checkpoint to dict""" ):
lowerCamelCase__ : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCamelCase__ : Dict = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Optional[Any] = array
return tf_weights
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ).parent.name
lowerCamelCase__ : Optional[Any] = task_specific_params[f'''summarization_{dataset}''']["max_position_embeddings"]
lowerCamelCase__ : int = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__SCREAMING_SNAKE_CASE )
# convert model
lowerCamelCase__ : Tuple = get_tf_weights_as_numpy(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : List[Any] = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
lowerCamelCase__ : Optional[int] = task_specific_params
lowerCamelCase__ : Optional[int] = convert_pegasus(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__SCREAMING_SNAKE_CASE , Path(__SCREAMING_SNAKE_CASE ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : Optional[Any] =parser.parse_args()
if args.save_dir is None:
_A : Tuple =Path(args.tf_ckpt_path).parent.name
_A : Any =os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 704 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = [0] * len(UpperCamelCase )
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Optional[Any] = [1] * len(UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(UpperCamelCase )
while queue:
lowerCamelCase__ : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase__ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(UpperCamelCase )
print(max(UpperCamelCase ) )
# Adjacency list of Graph
_A : List[Any] ={0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 705 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( __lowercase , __lowercase ):
@register_to_config
def __init__( self: int , UpperCamelCase__: int = 768 , ):
super().__init__()
lowerCamelCase__ : Any = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = nn.Parameter(torch.ones(1 , UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Optional[Union[str, torch.device]] = None , UpperCamelCase__: Optional[torch.dtype] = None , ):
lowerCamelCase__ : List[Any] = nn.Parameter(self.mean.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = nn.Parameter(self.std.to(UpperCamelCase__ ).to(UpperCamelCase__ ) )
return self
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Optional[Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : Dict = (embeds * self.std) + self.mean
return embeds | 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_A : Any =logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowercase ( _lowercase ):
def __init__( self: Any , UpperCamelCase__: CLIPSegForImageSegmentation , UpperCamelCase__: CLIPSegProcessor , UpperCamelCase__: AutoencoderKL , UpperCamelCase__: CLIPTextModel , UpperCamelCase__: CLIPTokenizer , UpperCamelCase__: UNetaDConditionModel , UpperCamelCase__: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase__: StableDiffusionSafetyChecker , UpperCamelCase__: CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
lowerCamelCase__ : Dict = (
F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , __UpperCamelCase , standard_warn=__UpperCamelCase )
lowerCamelCase__ : Optional[Any] = dict(scheduler.config )
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Union[str, Any] = FrozenDict(__UpperCamelCase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase__ : Any = (
F'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , __UpperCamelCase , standard_warn=__UpperCamelCase )
lowerCamelCase__ : int = dict(scheduler.config )
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = FrozenDict(__UpperCamelCase )
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=__UpperCamelCase , segmentation_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase_ ( self: str ):
self.enable_attention_slicing(__UpperCamelCase )
def lowerCamelCase_ ( self: Optional[int] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase__ : int = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase_ ( self: int ):
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self: Dict , UpperCamelCase__: Union[str, List[str]] , UpperCamelCase__: Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase__: str , UpperCamelCase__: int = 512 , UpperCamelCase__: int = 512 , UpperCamelCase__: int = 50 , UpperCamelCase__: float = 7.5 , UpperCamelCase__: Optional[Union[str, List[str]]] = None , UpperCamelCase__: Optional[int] = 1 , UpperCamelCase__: float = 0.0 , UpperCamelCase__: Optional[torch.Generator] = None , UpperCamelCase__: Optional[torch.FloatTensor] = None , UpperCamelCase__: Optional[str] = "pil" , UpperCamelCase__: bool = True , UpperCamelCase__: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__: int = 1 , **UpperCamelCase__: List[Any] , ):
lowerCamelCase__ : str = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
lowerCamelCase__ : Dict = self.segmentation_model(**__UpperCamelCase )
lowerCamelCase__ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase__ : Any = self.numpy_to_pil(__UpperCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase__ : Any = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , )
| 707 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowerCamelCase__ : Any = sum(UpperCamelCase ) / len(UpperCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_A : Optional[Any] =None
_A : Union[str, Any] =logging.get_logger(__name__)
_A : Any ={'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Optional[Any] ={
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
_A : Union[str, Any] ={
'''google/rembert''': 256,
}
_A : Any ='''▁'''
class _lowercase ( __A ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = RemBertTokenizer
def __init__( self: str , UpperCamelCase__: Any=None , UpperCamelCase__: List[str]=None , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=False , UpperCamelCase__: Union[str, Any]="[CLS]" , UpperCamelCase__: str="[SEP]" , UpperCamelCase__: Any="<unk>" , UpperCamelCase__: Optional[Any]="[SEP]" , UpperCamelCase__: Any="<pad>" , UpperCamelCase__: Union[str, Any]="[CLS]" , UpperCamelCase__: Tuple="[MASK]" , **UpperCamelCase__: Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : Tuple = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : List[Any] = do_lower_case
lowerCamelCase__ : List[Any] = remove_space
lowerCamelCase__ : Dict = keep_accents
lowerCamelCase__ : Union[str, Any] = vocab_file
lowerCamelCase__ : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: List[str] = None ):
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Dict = None , UpperCamelCase__: Dict = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int = None ):
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
lowerCamelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self: int , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCamelCase__ ) )
return
lowerCamelCase__ : Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 709 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A : Optional[int] =logging.get_logger(__name__)
_A : Optional[int] ={
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowercase ( a__ ):
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Dict , UpperCamelCase__: Optional[int]=100 , UpperCamelCase__: List[str]=5 , UpperCamelCase__: Dict=1 , UpperCamelCase__: str=1 , UpperCamelCase__: Dict=249 , UpperCamelCase__: List[str]=6 , UpperCamelCase__: Union[str, Any]=17 , UpperCamelCase__: Tuple=25 , UpperCamelCase__: List[str]=4 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Any=128 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: str=0.0_006 , UpperCamelCase__: Optional[int]=512 , UpperCamelCase__: Any=0.02 , UpperCamelCase__: Any=1e-12 , UpperCamelCase__: Optional[Any]=1 , UpperCamelCase__: Any=True , UpperCamelCase__: List[str]=1 , UpperCamelCase__: Union[str, Any]=50_256 , UpperCamelCase__: Any=50_256 , **UpperCamelCase__: Any , ):
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Optional[int] = action_weight
lowerCamelCase__ : Dict = reward_weight
lowerCamelCase__ : Optional[Any] = value_weight
lowerCamelCase__ : Optional[Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = block_size
lowerCamelCase__ : Dict = action_dim
lowerCamelCase__ : Dict = observation_dim
lowerCamelCase__ : Optional[int] = transition_dim
lowerCamelCase__ : List[Any] = learning_rate
lowerCamelCase__ : Any = n_layer
lowerCamelCase__ : List[Any] = n_head
lowerCamelCase__ : Any = n_embd
lowerCamelCase__ : List[str] = embd_pdrop
lowerCamelCase__ : int = attn_pdrop
lowerCamelCase__ : Tuple = resid_pdrop
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = layer_norm_eps
lowerCamelCase__ : Dict = kaiming_initializer_range
lowerCamelCase__ : Any = use_cache
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) | 710 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 0 |
'''simple docstring'''
_A : Tuple ="0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : List[Any] ={
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =[
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=10 ) -> List[str]:
lowerCamelCase__ : int = []
for _ in range(UpperCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=10 ) -> Tuple:
lowerCamelCase__ : Any = []
for step in range(UpperCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : List[Any] = os.path.join(UpperCAmelCase__ , """schedule.bin""" )
torch.save(scheduler.state_dict() , UpperCAmelCase__ )
lowerCamelCase__ : Tuple = torch.load(UpperCAmelCase__ )
scheduler.load_state_dict(UpperCAmelCase__ )
return lrs
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] ):
self.assertEqual(len(_A ) , len(_A ) )
for a, b in zip(_A , _A ):
self.assertAlmostEqual(_A , _A , delta=_A )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_A )
lowerCamelCase__ : Optional[int] = torch.tensor([0.4, 0.2, -0.5] )
lowerCamelCase__ : Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCamelCase__ : int = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
lowerCamelCase__ : Tuple = criterion(_A , _A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_A )
lowerCamelCase__ : Dict = torch.tensor([0.4, 0.2, -0.5] )
lowerCamelCase__ : Union[str, Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCamelCase__ : Optional[int] = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_A , weight_decay=0.0 , relative_step=_A , scale_parameter=_A , warmup_init=_A , )
for _ in range(1_000 ):
lowerCamelCase__ : Optional[int] = criterion(_A , _A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
a = nn.Linear(50 , 50 ) if is_torch_available() else None
a = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
a = 10
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: Dict=None ):
self.assertEqual(len(_A ) , len(_A ) )
for a, b in zip(_A , _A ):
self.assertAlmostEqual(_A , _A , delta=_A , msg=_A )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCamelCase__ : Union[str, Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCamelCase__ , lowerCamelCase__ : List[Any] = data
lowerCamelCase__ : Any = scheduler_func(self.optimizer , **_A )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCamelCase__ : Dict = unwrap_schedule(_A , self.num_steps )
self.assertListAlmostEqual(
_A , _A , tol=1e-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
lowerCamelCase__ : Any = scheduler_func(self.optimizer , **_A )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_A ) # wrap to test picklability of the schedule
lowerCamelCase__ : Tuple = unwrap_and_save_reload_schedule(_A , self.num_steps )
self.assertListEqual(_A , _A , msg=F'''failed for {scheduler_func} in save and reload''' )
class _lowercase :
'''simple docstring'''
def __init__( self: Any , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[Any] = fn
def __call__( self: Any , *UpperCamelCase__: List[str] , **UpperCamelCase__: Any ):
return self.fn(*_A , **_A )
@classmethod
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 714 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
def __init__( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: List[str]=7 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=99 , UpperCamelCase__: List[Any]=24 , UpperCamelCase__: Dict=2 , UpperCamelCase__: Optional[int]=6 , UpperCamelCase__: int=37 , UpperCamelCase__: List[str]="gelu" , UpperCamelCase__: str=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: str=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=1_000 , ):
lowerCamelCase__ : Optional[int] = parent
lowerCamelCase__ : Optional[int] = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Optional[int] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Any = range_bbox
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ : Union[str, Any] = bbox[i, j, 3]
lowerCamelCase__ : Tuple = bbox[i, j, 1]
lowerCamelCase__ : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ : Optional[int] = bbox[i, j, 2]
lowerCamelCase__ : List[str] = bbox[i, j, 0]
lowerCamelCase__ : Tuple = t
lowerCamelCase__ : List[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self: Dict ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: str , ):
lowerCamelCase__ : List[str] = LiltModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__UpperCamelCase , bbox=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase__ : List[str] = model(__UpperCamelCase , bbox=__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = model(__UpperCamelCase , bbox=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Dict , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any , ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = LiltForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(
__UpperCamelCase , bbox=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , ):
lowerCamelCase__ : int = LiltForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase__ : int = model(
__UpperCamelCase , bbox=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
a = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
a = False
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple ):
return True
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = LiltModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Dict = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[Any] = LiltModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
@slow
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(__UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = torch.tensor([[1, 2]] , device=__UpperCamelCase )
lowerCamelCase__ : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Any = model(input_ids=__UpperCamelCase , bbox=__UpperCamelCase )
lowerCamelCase__ : List[str] = torch.Size([1, 2, 768] )
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCamelCase , atol=1e-3 ) )
| 715 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : List[str] =logging.get_logger(__name__)
_A : Tuple ={
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _lowercase ( _lowercase ):
'''simple docstring'''
a = """levit"""
def __init__( self: List[Any] , UpperCamelCase__: Optional[Any]=224 , UpperCamelCase__: int=3 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: int=1 , UpperCamelCase__: Optional[Any]=16 , UpperCamelCase__: Tuple=[128, 256, 384] , UpperCamelCase__: Union[str, Any]=[4, 8, 12] , UpperCamelCase__: str=[4, 4, 4] , UpperCamelCase__: Tuple=[16, 16, 16] , UpperCamelCase__: Tuple=0 , UpperCamelCase__: Optional[Any]=[2, 2, 2] , UpperCamelCase__: Optional[Any]=[2, 2, 2] , UpperCamelCase__: Tuple=0.02 , **UpperCamelCase__: Optional[int] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Optional[int] = kernel_size
lowerCamelCase__ : Optional[Any] = stride
lowerCamelCase__ : Union[str, Any] = padding
lowerCamelCase__ : List[str] = hidden_sizes
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = depths
lowerCamelCase__ : Any = key_dim
lowerCamelCase__ : Tuple = drop_path_rate
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : Optional[int] = attention_ratio
lowerCamelCase__ : Optional[int] = mlp_ratio
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Tuple = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _lowercase ( _lowercase ):
'''simple docstring'''
a = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self: Dict ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self: int ):
return 1e-4
| 716 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 0 ) -> Any:
lowerCamelCase__ : int = length or len(UpperCamelCase )
lowerCamelCase__ : Optional[int] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowerCamelCase__ , lowerCamelCase__ : str = list_data[i + 1], list_data[i]
lowerCamelCase__ : Any = True
return list_data if not swapped else bubble_sort(UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
_A : Tuple =[]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : str = 1
solve(UpperCamelCase , row + 1 )
lowerCamelCase__ : List[str] = 0
return False
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
_A : List[str] =8
_A : Tuple =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 718 |
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = current_set.copy()
for row_index, row in enumerate(snake_case_ ):
lowerCamelCase__ : str = row[0]
for column_index, column in enumerate(snake_case_ ):
if magnitude == 0:
lowerCamelCase__ : Any = column
continue
lowerCamelCase__ : Dict = column / magnitude
# Subtract to cancel term
lowerCamelCase__ : Optional[Any] = current_set[0]
lowerCamelCase__ : Any = [first_row]
lowerCamelCase__ : str = current_set[1::]
for row in current_set:
lowerCamelCase__ : Dict = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(snake_case_ )
continue
for column_index in range(len(snake_case_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(snake_case_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCamelCase__ : Tuple = final_set[0]
lowerCamelCase__ : Dict = []
lowerCamelCase__ : List[str] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCamelCase__ : List[Any] = simplify(snake_case_ )
for i in range(len(snake_case_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , snake_case_ )
lowerCamelCase__ : Optional[Any] = resultant
return final_set
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
if len(snake_case_ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowerCamelCase__ : Dict = len(snake_case_ ) + 1
if any(len(snake_case_ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(snake_case_ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(snake_case_ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCamelCase__ : str = equations.copy()
if any(0 in row for row in data_set ):
lowerCamelCase__ : Dict = data_set.copy()
lowerCamelCase__ : Union[str, Any] = []
for row_index, row in enumerate(snake_case_ ):
if 0 not in row:
lowerCamelCase__ : List[Any] = data_set.pop(snake_case_ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , snake_case_ )
lowerCamelCase__ : int = data_set.copy()
lowerCamelCase__ : int = simplify(snake_case_ )
lowerCamelCase__ : List[Any] = simplified[::-1]
lowerCamelCase__ : list = []
for row in simplified:
lowerCamelCase__ : Optional[int] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCamelCase__ : Tuple = row.copy()[: len(snake_case_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(snake_case_ ) == 0:
solutions.append(0 )
continue
lowerCamelCase__ : List[Any] = temp_row[1::]
lowerCamelCase__ : List[Any] = temp_row[::-1]
for column_index, column in enumerate(snake_case_ ):
current_solution -= column * solutions[column_index]
solutions.append(snake_case_ )
lowerCamelCase__ : List[Any] = []
for item in solutions:
final.append(float(round(snake_case_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Tuple =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Any = 10
lowerCamelCase__ : Tuple = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
lowerCamelCase__ : Optional[Any] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(UpperCamelCase__ ) ),
} , features=UpperCamelCase__ , )
return dataset
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=UpperCamelCase__ )
return filename
# FILE_CONTENT + files
_A : List[str] ="""\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
lowerCamelCase__ : Optional[Any] = FILE_CONTENT
with open(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ )
return filename
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
import bza
lowerCamelCase__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
lowerCamelCase__ : Any = bytes(UpperCamelCase__ , """utf-8""" )
with bza.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
import gzip
lowerCamelCase__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
lowerCamelCase__ : str = bytes(UpperCamelCase__ , """utf-8""" )
with gzip.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCamelCase__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
lowerCamelCase__ : str = bytes(UpperCamelCase__ , """utf-8""" )
with lza.frame.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCamelCase__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(UpperCamelCase__ , """w""" ) as archive:
archive.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
import tarfile
lowerCamelCase__ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(UpperCamelCase__ , """w""" ) as f:
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
import lzma
lowerCamelCase__ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
lowerCamelCase__ : List[Any] = bytes(UpperCamelCase__ , """utf-8""" )
with lzma.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Dict:
import zipfile
lowerCamelCase__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCamelCase__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
lowerCamelCase__ : str = bytes(UpperCamelCase__ , """utf-8""" )
with zstd.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : str = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
lowerCamelCase__ : Optional[int] = textwrap.dedent(
"""\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>""" )
with open(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ )
return filename
_A : Union[str, Any] =[
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
_A : List[Any] =[
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
_A : Tuple ={
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
_A : List[Any] =[
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
_A : Dict =[
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ () -> str:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
lowerCamelCase__ : Optional[Any] = datasets.Dataset.from_dict(UpperCamelCase__ )
lowerCamelCase__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(UpperCamelCase__ ) ) as con:
lowerCamelCase__ : Optional[int] = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(UpperCamelCase__ , """w""" , newline="""""" ) as f:
lowerCamelCase__ : str = csv.DictWriter(UpperCamelCase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(UpperCamelCase__ , """w""" , newline="""""" ) as f:
lowerCamelCase__ : Tuple = csv.DictWriter(UpperCamelCase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> str:
import bza
lowerCamelCase__ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase__ : List[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
lowerCamelCase__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
lowerCamelCase__ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(UpperCamelCase__ , """wb""" ) as f:
lowerCamelCase__ : Union[str, Any] = pq.ParquetWriter(UpperCamelCase__ , schema=UpperCamelCase__ )
lowerCamelCase__ : int = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase__ ) )] for k in DATA[0]} , schema=UpperCamelCase__ )
writer.write_table(UpperCamelCase__ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
lowerCamelCase__ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCamelCase__ : Dict = {"""data""": DATA}
with open(UpperCamelCase__ , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCamelCase__ : List[str] = {"""data""": DATA_DICT_OF_LISTS}
with open(UpperCamelCase__ , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(UpperCamelCase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(UpperCamelCase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]:
lowerCamelCase__ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(UpperCamelCase__ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCamelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
lowerCamelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(UpperCamelCase__ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCamelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
import gzip
lowerCamelCase__ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(UpperCamelCase__ , """rb""" ) as orig_file:
with gzip.open(UpperCamelCase__ , """wb""" ) as zipped_file:
zipped_file.writelines(UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]:
import gzip
lowerCamelCase__ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(UpperCamelCase__ , """rb""" ) as orig_file:
with gzip.open(UpperCamelCase__ , """wb""" ) as zipped_file:
zipped_file.writelines(UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join("""nested""" , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(UpperCamelCase__ , """w""" ) as f:
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(UpperCamelCase__ , """w""" ) as f:
f.add(UpperCamelCase__ , arcname=os.path.join("""nested""" , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : Dict = ["""0""", """1""", """2""", """3"""]
lowerCamelCase__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(UpperCamelCase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
lowerCamelCase__ : Union[str, Any] = ["""0""", """1""", """2""", """3"""]
lowerCamelCase__ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(UpperCamelCase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Optional[int] = ["""0""", """1""", """2""", """3"""]
lowerCamelCase__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(UpperCamelCase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(UpperCamelCase__ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : int = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
lowerCamelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ () -> str:
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(UpperCamelCase__ , """w""" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Optional[Any] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 720 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 | 0 |
'''simple docstring'''
import os
from math import logaa
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "base_exp.txt" ) -> int:
lowerCamelCase__ : str = 0
lowerCamelCase__ : str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = list(map(lowerCamelCase__ , line.split(""",""" ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
lowerCamelCase__ : Dict = x * logaa(lowerCamelCase__ )
lowerCamelCase__ : Dict = i + 1
return result
if __name__ == "__main__":
print(solution())
| 721 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[int]:
if length <= 0 or not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(UpperCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 700 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger()
@dataclass
class _lowercase :
a = 42
a = field(default_factory=_lowercase )
a = field(default_factory=_lowercase )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: Tensor , UpperCamelCase__: Tensor ):
lowerCamelCase__ : List[str] = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__ , nn.Convad ) or isinstance(UpperCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self: Optional[int] , UpperCamelCase__: Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase_ ( self: List[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowercase :
a = 42
a = 42
a = 1
a = field(default_factory=_lowercase )
a = field(default_factory=_lowercase )
a = True
def __call__( self: Union[str, Any] , UpperCamelCase__: Tensor ):
lowerCamelCase__ : Dict = Tracker(self.dest )(UpperCamelCase__ ).parametrized
lowerCamelCase__ : str = Tracker(self.src )(UpperCamelCase__ ).parametrized
lowerCamelCase__ : Any = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.src_skip , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.dest_skip , UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while'''
F''' destination module has {len(UpperCamelCase__ )}.''' )
for dest_m, src_m in zip(UpperCamelCase__ , UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class _lowercase ( nn.Module ):
def __init__( self: Optional[Any] , UpperCamelCase__: nn.Module ):
super().__init__()
lowerCamelCase__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), F'''Unexpected layer name {k}'''
lowerCamelCase__ : Union[str, Any] = len(UpperCamelCase__ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
lowerCamelCase__ : str = nn.ModuleDict(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Tensor ):
return get_trunk_forward_outputs(
UpperCamelCase__ , out_feat_keys=UpperCamelCase__ , feature_blocks=self._feature_blocks , )
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str ):
lowerCamelCase__ : Optional[int] = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self: Tuple , UpperCamelCase__: str ):
# default to timm!
if x not in self:
lowerCamelCase__ : Dict = self.convert_name_to_timm(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = partial(lambda: (timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval(), None) )
else:
lowerCamelCase__ : Dict = super().__getitem__(UpperCamelCase__ )
return val
class _lowercase ( _lowercase ):
def __getitem__( self: int , UpperCamelCase__: str ):
if "seer" in x and "in1k" not in x:
lowerCamelCase__ : Optional[Any] = RegNetModel
else:
lowerCamelCase__ : Optional[int] = RegNetForImageClassification
return val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
for from_key, to_key in keys:
lowerCamelCase__ : Union[str, Any] = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = True , ) -> Dict:
print(f'''Converting {name}...''' )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = from_model_func()
lowerCamelCase__ : Tuple = our_model_func(UpperCamelCase ).eval()
lowerCamelCase__ : List[str] = ModuleTransfer(src=UpperCamelCase , dest=UpperCamelCase , raise_if_mismatch=UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase )
if from_state_dict is not None:
lowerCamelCase__ : Optional[int] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowerCamelCase__ : Optional[int] = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
lowerCamelCase__ : int = manually_copy_vissl_head(UpperCamelCase , our_model.state_dict() , UpperCamelCase )
our_model.load_state_dict(UpperCamelCase )
lowerCamelCase__ : int = our_model(UpperCamelCase , output_hidden_states=UpperCamelCase )
lowerCamelCase__ : List[str] = (
our_outputs.logits if isinstance(UpperCamelCase , UpperCamelCase ) else our_outputs.last_hidden_state
)
lowerCamelCase__ : List[Any] = from_model(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = from_output[-1] if type(UpperCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowerCamelCase__ : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(UpperCamelCase , UpperCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=UpperCamelCase , )
lowerCamelCase__ : str = 224 if """seer""" not in name else 384
# we can use the convnext one
lowerCamelCase__ : Any = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=UpperCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase , )
print(f'''Pushed {name}''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = None , UpperCamelCase = True ) -> Optional[int]:
lowerCamelCase__ : Dict = """imagenet-1k-id2label.json"""
lowerCamelCase__ : Dict = 1000
lowerCamelCase__ : Any = (1, num_labels)
lowerCamelCase__ : List[Any] = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ : Tuple = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : Tuple = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : int = partial(UpperCamelCase , num_labels=UpperCamelCase , idalabel=UpperCamelCase , labelaid=UpperCamelCase )
lowerCamelCase__ : Any = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
lowerCamelCase__ : List[Any] = NameToOurModelFuncMap()
lowerCamelCase__ : Any = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(UpperCamelCase , UpperCamelCase ) -> Tuple[nn.Module, Dict]:
lowerCamelCase__ : Any = torch.hub.load_state_dict_from_url(UpperCamelCase , model_dir=str(UpperCamelCase ) , map_location="""cpu""" )
lowerCamelCase__ : Dict = model_func()
# check if we have a head, if yes add it
lowerCamelCase__ : int = files["""classy_state_dict"""]["""base_model"""]["""model"""]
lowerCamelCase__ : Dict = model_state_dict["""trunk"""]
model.load_state_dict(UpperCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
lowerCamelCase__ : Union[str, Any] = partial(
UpperCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCamelCase__ : List[Any] = partial(
UpperCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCamelCase__ : Optional[int] = partial(
UpperCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCamelCase__ : str = partial(
UpperCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
lowerCamelCase__ : List[str] = partial(
UpperCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCamelCase__ : Dict = partial(
UpperCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCamelCase__ : str = partial(
UpperCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCamelCase__ : Union[str, Any] = partial(
UpperCamelCase , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , UpperCamelCase , UpperCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
return config, expected_shape
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_A : Optional[Any] =parser.parse_args()
_A : Path =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Tuple ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCamelCase__ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
lowerCamelCase__ : List[str] = DisjunctiveConstraint(UpperCamelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCamelCase__ ) )
with self.assertRaises(UpperCamelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCamelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCamelCase_ ( self: List[str] ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCamelCase__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCamelCase__ ):
DisjunctiveConstraint(UpperCamelCase__ ) # fails here
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[Any] = [[1, 2, 3], [1, 2, 4]]
lowerCamelCase__ : Union[str, Any] = DisjunctiveConstraint(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = dc.update(1 )
lowerCamelCase__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ : int = dc.update(2 )
lowerCamelCase__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ : List[Any] = dc.update(3 )
lowerCamelCase__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(UpperCamelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCamelCase__ : Optional[int] = DisjunctiveConstraint(UpperCamelCase__ )
lowerCamelCase__ : List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ : Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCamelCase__ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCamelCase__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 702 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 0 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_A : Union[str, Any] =datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowercase ( datasets.BuilderConfig ):
a = None
a = """utf-8"""
a = None
a = None
a = True # deprecated
a = None # deprecated
a = 10 << 20 # 10MB
a = None
class _lowercase ( datasets.ArrowBasedBuilder ):
a = JsonConfig
def lowerCamelCase_ ( self: str ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
lowerCamelCase__ : str = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase__ : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
lowerCamelCase__ : Optional[Any] = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[str] = [files]
lowerCamelCase__ : str = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCamelCase__ : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = [files]
lowerCamelCase__ : Dict = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase_ ( self: str , UpperCamelCase__: pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCamelCase__ : Tuple = self.config.features.arrow_schema.field(UpperCamelCase__ ).type
lowerCamelCase__ : Any = pa_table.append_column(UpperCamelCase__ , pa.array([None] * len(UpperCamelCase__ ) , type=UpperCamelCase__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase__ : Optional[Any] = table_cast(UpperCamelCase__ , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[int] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCamelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCamelCase__ : Union[str, Any] = json.load(UpperCamelCase__ )
# We keep only the field we are interested in
lowerCamelCase__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCamelCase__ , (list, tuple) ):
lowerCamelCase__ : Any = set().union(*[row.keys() for row in dataset] )
lowerCamelCase__ : Tuple = {col: [row.get(UpperCamelCase__ ) for row in dataset] for col in keys}
else:
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : Dict = pa.Table.from_pydict(UpperCamelCase__ )
yield file_idx, self._cast_table(UpperCamelCase__ )
# If the file has one json object per line
else:
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase__ : Dict = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCamelCase__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 )
lowerCamelCase__ : List[str] = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
lowerCamelCase__ : List[str] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCamelCase__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCamelCase__ : Union[str, Any] = batch.decode(self.config.encoding , errors=UpperCamelCase__ ).encode("""utf-8""" )
try:
while True:
try:
lowerCamelCase__ : List[str] = paj.read_json(
io.BytesIO(UpperCamelCase__ ) , read_options=paj.ReadOptions(block_size=UpperCamelCase__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCamelCase__ , pa.ArrowInvalid )
and "straddling" not in str(UpperCamelCase__ )
or block_size > len(UpperCamelCase__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(UpperCamelCase__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCamelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCamelCase__ : Tuple = json.load(UpperCamelCase__ )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase__ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # list is the only sequence type supported in JSON
try:
lowerCamelCase__ : Tuple = set().union(*[row.keys() for row in dataset] )
lowerCamelCase__ : Any = {col: [row.get(UpperCamelCase__ ) for row in dataset] for col in keys}
lowerCamelCase__ : List[Any] = pa.Table.from_pydict(UpperCamelCase__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase__ )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(UpperCamelCase__ )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase__ )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCamelCase__ )
batch_idx += 1
| 703 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 0 |
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_A : Union[str, Any] =get_logger()
_A : Optional[dict] =None
class _lowercase ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__( self: List[Any] , UpperCamelCase__: List[Any]=None , UpperCamelCase__: Union[str, Any]=None , **UpperCamelCase__: Optional[int] ):
super().__init__(features=UpperCamelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(UpperCamelCase__ )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
lowerCamelCase__ : List[str] = device if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ : str = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
lowerCamelCase__ : str = str(jax.devices()[0] )
lowerCamelCase__ : List[Any] = jnp_array_kwargs
@staticmethod
def lowerCamelCase_ ( ):
import jax
return {str(UpperCamelCase__ ): device for device in jax.devices()}
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[Any] ):
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and column:
if all(
isinstance(UpperCamelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCamelCase__ , axis=0 )
return column
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[Any] ):
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , (str, bytes, type(UpperCamelCase__ )) ):
return value
elif isinstance(UpperCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase__ : int = {}
if isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCamelCase__ : Union[str, Any] = {"""dtype""": jnp.intaa}
else:
lowerCamelCase__ : Any = {"""dtype""": jnp.intaa}
elif isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase__ : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowerCamelCase__ : List[str] = np.asarray(UpperCamelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ : Optional[int] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCamelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Optional[Any] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCamelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCamelCase__ , """__array__""" ) and not isinstance(UpperCamelCase__ , jax.Array ):
lowerCamelCase__ : List[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: dict ):
return map_nested(self._recursive_tensorize , UpperCamelCase__ , map_list=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: pa.Table ):
lowerCamelCase__ : str = self.numpy_arrow_extractor().extract_row(UpperCamelCase__ )
lowerCamelCase__ : List[str] = self.python_features_decoder.decode_row(UpperCamelCase__ )
return self.recursive_tensorize(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: pa.Table ):
lowerCamelCase__ : List[Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase__ , pa_table.column_names[0] )
lowerCamelCase__ : int = self.recursive_tensorize(UpperCamelCase__ )
lowerCamelCase__ : Any = self._consolidate(UpperCamelCase__ )
return column
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: pa.Table ):
lowerCamelCase__ : Any = self.numpy_arrow_extractor().extract_batch(UpperCamelCase__ )
lowerCamelCase__ : int = self.python_features_decoder.decode_batch(UpperCamelCase__ )
lowerCamelCase__ : int = self.recursive_tensorize(UpperCamelCase__ )
for column_name in batch:
lowerCamelCase__ : Tuple = self._consolidate(batch[column_name] )
return batch
| 704 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 | 0 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ : List[str] = grid.shape
lowerCamelCase__ : str = [-1, 1, 0, 0]
lowerCamelCase__ : Optional[int] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ : Optional[Any] = [(0, source)], set()
lowerCamelCase__ : Optional[Any] = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[Any] = np.empty((rows, cols) , dtype=UpperCamelCase )
lowerCamelCase__ : str = None
while queue:
(lowerCamelCase__) : Any = heappop(UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ : Tuple = predecessors[x, y]
path.append(UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ : int = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Dict = dist + 1
lowerCamelCase__ : List[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list:
lowerCamelCase__ : str = int(UpperCamelCase )
if n_element < 1:
lowerCamelCase__ : int = ValueError("""a should be a positive number""" )
raise my_error
lowerCamelCase__ : Tuple = [1]
lowerCamelCase__ : List[str] = (0, 0, 0)
lowerCamelCase__ : List[str] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_A : str =input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
_A : List[Any] =hamming(int(n))
print('''-----------------------------------------------------''')
print(F'The list with nth numbers is: {hamming_numbers}')
print('''-----------------------------------------------------''') | 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631 | 0 |
'''simple docstring'''
_A : Optional[int] ={
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 707 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=0.999 , UpperCamelCase="cosine" , ) -> Tuple:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCamelCase__ : Optional[Any] = []
for i in range(UpperCamelCase ):
lowerCamelCase__ : Any = i / num_diffusion_timesteps
lowerCamelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase ) / alpha_bar_fn(UpperCamelCase ) , UpperCamelCase ) )
return torch.tensor(UpperCamelCase , dtype=torch.floataa )
class _lowercase ( _lowercase , _lowercase ):
a = [e.name for e in KarrasDiffusionSchedulers]
a = 2
@register_to_config
def __init__( self: Tuple , UpperCamelCase__: int = 1_000 , UpperCamelCase__: float = 0.00_085 , UpperCamelCase__: float = 0.012 , UpperCamelCase__: str = "linear" , UpperCamelCase__: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase__: str = "epsilon" , UpperCamelCase__: str = "linspace" , UpperCamelCase__: int = 0 , ):
if trained_betas is not None:
lowerCamelCase__ : List[Any] = torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCamelCase__ : Dict = torch.linspace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase__ : int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase__ : Any = betas_for_alpha_bar(UpperCamelCase__ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCamelCase__ : Union[str, Any] = 1.0 - self.betas
lowerCamelCase__ : Optional[Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str , UpperCamelCase__: Any=None ):
if schedule_timesteps is None:
lowerCamelCase__ : int = self.timesteps
lowerCamelCase__ : Optional[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCamelCase__ : Any = 1 if len(UpperCamelCase__ ) > 1 else 0
else:
lowerCamelCase__ : Optional[Any] = timestep.cpu().item() if torch.is_tensor(UpperCamelCase__ ) else timestep
lowerCamelCase__ : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase_ ( self: List[str] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: torch.FloatTensor , UpperCamelCase__: Union[float, torch.FloatTensor] , ):
lowerCamelCase__ : Any = self.index_for_timestep(UpperCamelCase__ )
if self.state_in_first_order:
lowerCamelCase__ : Tuple = self.sigmas[step_index]
else:
lowerCamelCase__ : str = self.sigmas_interpol[step_index]
lowerCamelCase__ : Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Union[str, torch.device] = None , UpperCamelCase__: Optional[int] = None , ):
lowerCamelCase__ : List[Any] = num_inference_steps
lowerCamelCase__ : List[str] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCamelCase__ : Any = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase__ , dtype=UpperCamelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCamelCase__ : Any = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase__ : Any = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCamelCase__ : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase__ : Any = (np.arange(UpperCamelCase__ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase__ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCamelCase__ : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCamelCase__ : Union[str, Any] = torch.from_numpy(np.log(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = np.interp(UpperCamelCase__ , np.arange(0 , len(UpperCamelCase__ ) ) , UpperCamelCase__ )
lowerCamelCase__ : List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCamelCase__ : Dict = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ )
# interpolate sigmas
lowerCamelCase__ : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowerCamelCase__ : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowerCamelCase__ : Union[str, Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase__ ).startswith("""mps""" ):
# mps does not support float64
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ , dtype=torch.floataa )
else:
lowerCamelCase__ : int = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
# interpolate timesteps
lowerCamelCase__ : Tuple = self.sigma_to_t(UpperCamelCase__ ).to(UpperCamelCase__ , dtype=timesteps.dtype )
lowerCamelCase__ : Tuple = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowerCamelCase__ : Dict = torch.cat([timesteps[:1], interleaved_timesteps] )
lowerCamelCase__ : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCamelCase__ : Tuple = defaultdict(UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
# get log sigma
lowerCamelCase__ : int = sigma.log()
# get distribution
lowerCamelCase__ : Dict = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCamelCase__ : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowerCamelCase__ : List[str] = low_idx + 1
lowerCamelCase__ : Optional[int] = self.log_sigmas[low_idx]
lowerCamelCase__ : str = self.log_sigmas[high_idx]
# interpolate sigmas
lowerCamelCase__ : Optional[int] = (low - log_sigma) / (low - high)
lowerCamelCase__ : int = w.clamp(0 , 1 )
# transform interpolation to time range
lowerCamelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCamelCase__ : List[Any] = t.view(sigma.shape )
return t
@property
def lowerCamelCase_ ( self: Any ):
return self.sample is None
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase__: Union[float, torch.FloatTensor] , UpperCamelCase__: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase__: bool = True , ):
lowerCamelCase__ : int = self.index_for_timestep(UpperCamelCase__ )
# advance index counter by 1
lowerCamelCase__ : List[str] = timestep.cpu().item() if torch.is_tensor(UpperCamelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCamelCase__ : Optional[Any] = self.sigmas[step_index]
lowerCamelCase__ : Tuple = self.sigmas_interpol[step_index + 1]
lowerCamelCase__ : Optional[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCamelCase__ : Optional[int] = self.sigmas[step_index - 1]
lowerCamelCase__ : Optional[Any] = self.sigmas_interpol[step_index]
lowerCamelCase__ : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCamelCase__ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCamelCase__ : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase__ : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCamelCase__ : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCamelCase__ : str = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCamelCase__ : Union[str, Any] = sigma_interpol - sigma_hat
# store for 2nd order step
lowerCamelCase__ : Any = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCamelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCamelCase__ : List[str] = sigma_next - sigma_hat
lowerCamelCase__ : Dict = self.sample
lowerCamelCase__ : str = None
lowerCamelCase__ : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: torch.FloatTensor , UpperCamelCase__: torch.FloatTensor , UpperCamelCase__: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCamelCase__ : Dict = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase__ ):
# mps does not support float64
lowerCamelCase__ : Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCamelCase__ : List[str] = self.timesteps.to(original_samples.device )
lowerCamelCase__ : List[Any] = timesteps.to(original_samples.device )
lowerCamelCase__ : Tuple = [self.index_for_timestep(UpperCamelCase__ , UpperCamelCase__ ) for t in timesteps]
lowerCamelCase__ : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCamelCase__ : List[Any] = sigma.unsqueeze(-1 )
lowerCamelCase__ : List[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 708 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631 | 0 |
'''simple docstring'''
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000000 ) -> int:
lowerCamelCase__ : Optional[int] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__ : Optional[int] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__ : Any = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'{solution() = }')
| 709 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_A : List[Any] =(3, 9, -11, 0, 7, 5, 1, -1)
_A : int =(4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowercase :
a = 42
a = 42
class _lowercase :
def __init__( self: int , UpperCamelCase__: Iterable[int] ):
lowerCamelCase__ : Node | None = None
for i in sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ):
lowerCamelCase__ : Dict = Node(UpperCamelCase__ , self.head )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : Dict = self.head
while node:
yield node.data
lowerCamelCase__ : List[str] = node.next_node
def __len__( self: Optional[Any] ):
return sum(1 for _ in self )
def __str__( self: Dict ):
return " -> ".join([str(UpperCamelCase__ ) for node in self] )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> SortedLinkedList:
return SortedLinkedList(list(UpperCamelCase ) + list(UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Any =SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 710 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _lowercase ( _lowercase ):
def __init__( self: List[Any] , UpperCamelCase__: Optional[int]="" , UpperCamelCase__: Tuple="train" ):
assert os.path.isdir(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Optional[Any] = os.listdir(UpperCamelCase__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not os.path.isfile(UpperCamelCase__ ):
continue
self.documents.append(UpperCamelCase__ )
def __len__( self: int ):
return len(self.documents )
def __getitem__( self: Dict , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Optional[int] = self.documents[idx]
lowerCamelCase__ : Optional[int] = document_path.split("""/""" )[-1]
with open(UpperCamelCase__ , encoding="""utf-8""" ) as source:
lowerCamelCase__ : List[str] = source.read()
lowerCamelCase__ : List[str] = process_story(UpperCamelCase__ )
return document_name, story_lines, summary_lines
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : List[str] = list(filter(lambda UpperCamelCase : len(UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
lowerCamelCase__ : Union[str, Any] = [_add_missing_period(UpperCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCamelCase__ : Dict = []
lowerCamelCase__ : List[str] = deque(UpperCamelCase )
while True:
try:
lowerCamelCase__ : str = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCamelCase__ : Optional[Any] = list(filter(lambda UpperCamelCase : not t.startswith("""@highlight""" ) , UpperCamelCase ) )
return story_lines, summary_lines
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Optional[Any] = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
if len(UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(UpperCamelCase )) )
return sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : Dict = torch.ones_like(UpperCamelCase )
lowerCamelCase__ : Any = sequence == pad_token_id
lowerCamelCase__ : List[str] = 0
return mask
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
lowerCamelCase__ : str = [tokenizer.encode(UpperCamelCase ) for line in story_lines]
lowerCamelCase__ : Dict = [token for sentence in story_lines_token_ids for token in sentence]
lowerCamelCase__ : Optional[int] = [tokenizer.encode(UpperCamelCase ) for line in summary_lines]
lowerCamelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = []
for sequence in batch:
lowerCamelCase__ : Optional[Any] = -1
lowerCamelCase__ : Optional[int] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(UpperCamelCase )
return torch.tensor(UpperCamelCase )
| 711 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_A : Optional[int] =logging.get_logger(__name__)
_A : Tuple ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Any ={
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
_A : List[str] ={'''allegro/herbert-base-cased''': 514}
_A : Tuple ={}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_INIT_CONFIGURATION
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = HerbertTokenizer
def __init__( self: Tuple , UpperCamelCase__: Dict=None , UpperCamelCase__: Any=None , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: List[Any]="<s>" , UpperCamelCase__: Any="<unk>" , UpperCamelCase__: Any="<pad>" , UpperCamelCase__: Dict="<mask>" , UpperCamelCase__: Any="</s>" , **UpperCamelCase__: int , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , **UpperCamelCase__ , )
def lowerCamelCase_ ( self: int , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : List[Any] = [self.cls_token_id]
lowerCamelCase__ : str = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None , UpperCamelCase__: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : str = [self.sep_token_id]
lowerCamelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Tuple = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_A : Tuple ={'''UserAgent''': UserAgent().random}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> dict:
lowerCamelCase__ : List[Any] = script.contents[0]
lowerCamelCase__ : str = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _lowercase :
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = F'''https://www.instagram.com/{username}/'''
lowerCamelCase__ : Dict = self.get_json()
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = requests.get(self.url , headers=UpperCamelCase__ ).text
lowerCamelCase__ : Tuple = BeautifulSoup(UpperCamelCase__ , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: Union[str, Any] ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: List[str] ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["username"]
@property
def lowerCamelCase_ ( self: Any ):
return self.user_data["full_name"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["biography"]
@property
def lowerCamelCase_ ( self: str ):
return self.user_data["business_email"]
@property
def lowerCamelCase_ ( self: int ):
return self.user_data["external_url"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCamelCase_ ( self: List[Any] ):
return self.user_data["edge_follow"]["count"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.user_data["profile_pic_url_hd"]
@property
def lowerCamelCase_ ( self: Optional[Any] ):
return self.user_data["is_verified"]
@property
def lowerCamelCase_ ( self: Tuple ):
return self.user_data["is_private"]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "github" ) -> None:
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowerCamelCase__ : int = InstagramUser(UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Dict =InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 713 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_A : Dict =logging.getLogger(__name__)
class _lowercase ( _lowercase ):
'''simple docstring'''
a = """masked_bert"""
def __init__( self: Dict , UpperCamelCase__: int=30_522 , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Dict=12 , UpperCamelCase__: List[Any]=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: Dict=0 , UpperCamelCase__: Tuple="topK" , UpperCamelCase__: int="constant" , UpperCamelCase__: Dict=0.0 , **UpperCamelCase__: List[str] , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : Optional[int] = pruning_method
lowerCamelCase__ : Optional[int] = mask_init
lowerCamelCase__ : Tuple = mask_scale
| 714 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[Any] ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_A : List[str] =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
for attribute in key.split(""".""" ):
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
lowerCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase__ : List[str] = value
elif weight_type == "weight_g":
lowerCamelCase__ : List[Any] = value
elif weight_type == "weight_v":
lowerCamelCase__ : List[str] = value
elif weight_type == "bias":
lowerCamelCase__ : Optional[Any] = value
else:
lowerCamelCase__ : Optional[Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = fairseq_model.state_dict()
lowerCamelCase__ : Tuple = hf_model.feature_extractor
lowerCamelCase__ : Dict = hf_model.adapter
for name, value in fairseq_dict.items():
lowerCamelCase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCamelCase__ : List[str] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCamelCase__ : Dict = True
if "*" in mapped_key:
lowerCamelCase__ : Dict = name.split(UpperCamelCase )[0].split(""".""" )[-2]
lowerCamelCase__ : Union[str, Any] = mapped_key.replace("""*""" , UpperCamelCase )
if "weight_g" in name:
lowerCamelCase__ : List[Any] = """weight_g"""
elif "weight_v" in name:
lowerCamelCase__ : int = """weight_v"""
elif "bias" in name:
lowerCamelCase__ : Tuple = """bias"""
elif "weight" in name:
lowerCamelCase__ : Any = """weight"""
else:
lowerCamelCase__ : str = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
lowerCamelCase__ : Optional[int] = full_name.split("""conv_layers.""" )[-1]
lowerCamelCase__ : Optional[Any] = name.split(""".""" )
lowerCamelCase__ : Dict = int(items[0] )
lowerCamelCase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase__ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase__ : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase__ : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase__ : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCamelCase__ : Any = full_name.split("""adaptor.""" )[-1]
lowerCamelCase__ : Union[str, Any] = name.split(""".""" )
if items[1].isdigit():
lowerCamelCase__ : Union[str, Any] = int(items[1] )
else:
lowerCamelCase__ : Optional[Any] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
lowerCamelCase__ : Any = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
lowerCamelCase__ : str = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
lowerCamelCase__ : List[Any] = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
lowerCamelCase__ : str = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(UpperCamelCase , UpperCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
lowerCamelCase__ : Union[str, Any] = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
lowerCamelCase__ : List[Any] = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Optional[Any] = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
lowerCamelCase__ : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Any:
lowerCamelCase__ : Union[str, Any] = WavaVecaConfig.from_pretrained(
UpperCamelCase , add_adapter=UpperCamelCase , adapter_stride=UpperCamelCase , adapter_kernel_size=UpperCamelCase , use_auth_token=UpperCamelCase , output_hidden_size=UpperCamelCase , )
lowerCamelCase__ : Optional[Any] = MBartConfig.from_pretrained(UpperCamelCase )
# load model
lowerCamelCase__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
lowerCamelCase__ : Optional[Any] = model[0].eval()
# load feature extractor
lowerCamelCase__ : List[str] = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase , use_auth_token=UpperCamelCase )
# set weights for wav2vec2 encoder
lowerCamelCase__ : Any = WavaVecaModel(UpperCamelCase )
recursively_load_weights_wavaveca(model.encoder , UpperCamelCase )
# load decoder weights
lowerCamelCase__ : Optional[Any] = MBartForCausalLM(UpperCamelCase )
lowerCamelCase__ : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowerCamelCase__ : Union[str, Any] = SpeechEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : int = MBartaaTokenizer(UpperCamelCase )
tokenizer.save_pretrained(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = hf_wavavec.config.to_dict()
lowerCamelCase__ : int = tokenizer.pad_token_id
lowerCamelCase__ : Union[str, Any] = tokenizer.bos_token_id
lowerCamelCase__ : Optional[int] = tokenizer.eos_token_id
lowerCamelCase__ : List[str] = """mbart50"""
lowerCamelCase__ : List[str] = """wav2vec2"""
lowerCamelCase__ : Optional[int] = tokenizer.eos_token_id
lowerCamelCase__ : Tuple = 250004
lowerCamelCase__ : List[Any] = tokenizer.eos_token_id
lowerCamelCase__ : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase )
hf_wavavec.save_pretrained(UpperCamelCase )
feature_extractor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[Any] =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=250_004, type=int, help='''`decoder_start_token_id` of model config''')
_A : List[Any] =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 715 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 716 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
# Return True if there is node that has not iterated.
lowerCamelCase__ : Dict = [False] * len(UpperCamelCase )
lowerCamelCase__ : Optional[int] = []
queue.append(UpperCamelCase )
lowerCamelCase__ : Dict = True
while queue:
lowerCamelCase__ : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase )
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : int = u
return visited[t]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
# This array is filled by BFS and to store path
lowerCamelCase__ : List[str] = [-1] * (len(UpperCamelCase ))
lowerCamelCase__ : str = 0
while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Dict = float("""Inf""" )
lowerCamelCase__ : Union[str, Any] = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ : Union[str, Any] = min(UpperCamelCase , graph[parent[s]][s] )
lowerCamelCase__ : Optional[Any] = parent[s]
max_flow += path_flow
lowerCamelCase__ : Any = sink
while v != source:
lowerCamelCase__ : str = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ : Dict = parent[v]
return max_flow
_A : Optional[int] =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_A : List[Any] =0, 5
print(ford_fulkerson(graph, source, sink))
| 717 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
lowerCamelCase__
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 718 |
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631 | 0 |
'''simple docstring'''
from math import factorial, pi
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 30 ) -> float:
if not isinstance(UpperCamelCase , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(UpperCamelCase , UpperCamelCase ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCamelCase__ : List[str] = float(UpperCamelCase )
lowerCamelCase__ : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 30 ) -> float:
if not isinstance(UpperCamelCase , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(UpperCamelCase , UpperCamelCase ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCamelCase__ : Optional[int] = float(UpperCamelCase )
lowerCamelCase__ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 720 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : str = [False] * len(UpperCamelCase )
lowerCamelCase__ : str = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : int ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 631 | 0 |
'''simple docstring'''
import requests
_A : Dict ='''''' # <-- Put your OpenWeatherMap appid here!
_A : str ='''https://api.openweathermap.org/data/2.5/'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "Chicago" , UpperCamelCase = APPID ) -> dict:
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "Kolkata, India" , UpperCamelCase = APPID ) -> dict:
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 55.68 , UpperCamelCase = 12.57 , UpperCamelCase = APPID ) -> dict:
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_A : Any =input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 721 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[int] = dataset
lowerCamelCase__ : Optional[int] = process
lowerCamelCase__ : List[str] = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = self.dataset[i]
lowerCamelCase__ : Union[str, Any] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Any=None ):
lowerCamelCase__ : int = loader
lowerCamelCase__ : str = infer
lowerCamelCase__ : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __len__( self: Dict ):
return len(self.loader )
def __iter__( self: Optional[int] ):
lowerCamelCase__ : List[Any] = iter(self.loader )
return self
def lowerCamelCase_ ( self: Any ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : str = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase__ : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def lowerCamelCase_ ( self: List[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Optional[Any] = next(self.iterator )
lowerCamelCase__ : List[str] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Optional[Any] = processed
else:
lowerCamelCase__ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase__ : Any = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Any = len(UpperCamelCase__ )
else:
lowerCamelCase__ : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : List[Any] = processed
lowerCamelCase__ : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self: Union[str, Any] ):
lowerCamelCase__ : str = iter(self.loader )
lowerCamelCase__ : int = None
return self
def lowerCamelCase_ ( self: str ):
if self.subiterator is None:
lowerCamelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase__ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : Any = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase__ : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( _lowercase ):
def __iter__( self: List[Any] ):
lowerCamelCase__ : int = iter(self.loader )
return self
def lowerCamelCase_ ( self: Tuple ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Any = self.loader_batch_item()
lowerCamelCase__ : Tuple = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase__ : Dict = processed
else:
lowerCamelCase__ : Dict = list(processed.keys() )[0]
lowerCamelCase__ : Dict = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : str = observed_batch_size
lowerCamelCase__ : str = processed
lowerCamelCase__ : Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : List[Any] = self.loader_batch_item()
lowerCamelCase__ : str = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
lowerCamelCase__ : Optional[Any] = processed
lowerCamelCase__ : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = dataset
lowerCamelCase__ : str = key
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Any ):
return self.dataset[i][self.key]
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Dataset , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Dict = keya
lowerCamelCase__ : List[str] = keya
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 631 | 0 |
'''simple docstring'''
_A : List[str] ={
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
_A : Optional[Any] ={value: key for key, value in encode_dict.items()}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : Union[str, Any] = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if set(UpperCamelCase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
lowerCamelCase__ : str = """"""
for word in coded.split():
while len(UpperCamelCase ) != 0:
decoded += decode_dict[word[:5]]
lowerCamelCase__ : List[str] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 700 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : Dict = SwinConfig()
lowerCamelCase__ : str = swin_name.split("""_""" )
lowerCamelCase__ : str = name_split[1]
lowerCamelCase__ : Optional[int] = int(name_split[4] )
lowerCamelCase__ : Dict = int(name_split[3][-1] )
if model_size == "tiny":
lowerCamelCase__ : List[Any] = 96
lowerCamelCase__ : Optional[int] = (2, 2, 6, 2)
lowerCamelCase__ : str = (3, 6, 12, 24)
elif model_size == "small":
lowerCamelCase__ : Optional[Any] = 96
lowerCamelCase__ : Union[str, Any] = (2, 2, 18, 2)
lowerCamelCase__ : Optional[Any] = (3, 6, 12, 24)
elif model_size == "base":
lowerCamelCase__ : Any = 128
lowerCamelCase__ : Union[str, Any] = (2, 2, 18, 2)
lowerCamelCase__ : Dict = (4, 8, 16, 32)
else:
lowerCamelCase__ : Optional[int] = 192
lowerCamelCase__ : Any = (2, 2, 18, 2)
lowerCamelCase__ : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCamelCase__ : Any = 21841
else:
lowerCamelCase__ : Optional[Any] = 1000
lowerCamelCase__ : Optional[int] = """huggingface/label-files"""
lowerCamelCase__ : Union[str, Any] = """imagenet-1k-id2label.json"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : int = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[Any] = img_size
lowerCamelCase__ : List[Any] = num_classes
lowerCamelCase__ : Optional[Any] = embed_dim
lowerCamelCase__ : Optional[int] = depths
lowerCamelCase__ : Any = num_heads
lowerCamelCase__ : List[str] = window_size
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowerCamelCase__ : int = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[str] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
lowerCamelCase__ : Optional[int] = """encoder.""" + name
if "attn.proj" in name:
lowerCamelCase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase__ : List[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase__ : int = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase__ : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
lowerCamelCase__ : int = """layernorm.weight"""
if name == "norm.bias":
lowerCamelCase__ : Any = """layernorm.bias"""
if "head" in name:
lowerCamelCase__ : List[Any] = name.replace("""head""" , """classifier""" )
else:
lowerCamelCase__ : List[Any] = """swin.""" + name
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]:
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Optional[int] = orig_state_dict.pop(UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCamelCase__ : Optional[int] = key.split(""".""" )
lowerCamelCase__ : List[Any] = int(key_split[1] )
lowerCamelCase__ : Optional[int] = int(key_split[3] )
lowerCamelCase__ : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase__ : Union[str, Any] = val[:dim, :]
lowerCamelCase__ : Any = val[
dim : dim * 2, :
]
lowerCamelCase__ : List[str] = val[-dim:, :]
else:
lowerCamelCase__ : str = val[
:dim
]
lowerCamelCase__ : Any = val[
dim : dim * 2
]
lowerCamelCase__ : Any = val[
-dim:
]
else:
lowerCamelCase__ : Optional[Any] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Optional[int] = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase )
timm_model.eval()
lowerCamelCase__ : Optional[Any] = get_swin_config(UpperCamelCase )
lowerCamelCase__ : Dict = SwinForImageClassification(UpperCamelCase )
model.eval()
lowerCamelCase__ : Tuple = convert_state_dict(timm_model.state_dict() , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
lowerCamelCase__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
lowerCamelCase__ : List[str] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
lowerCamelCase__ : List[str] = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Optional[int] = timm_model(inputs["""pixel_values"""] )
lowerCamelCase__ : Any = model(**UpperCamelCase ).logits
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : List[str] =parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A : Any ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_A : Union[str, Any] =logging.get_logger(__name__)
set_seed(770)
_A : Optional[Any] ={
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
_A : Union[str, Any] ={
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
_A : Any =os.path.dirname(os.path.abspath(__file__))
_A : Tuple =os.path.join(os.path.expanduser('''~'''), '''.cache''')
_A : Dict =os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Optional[Any]:
lowerCamelCase__ : List[str] = model_type
if use_small:
key += "_small"
return os.path.join(UpperCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
hf_hub_download(repo_id=UpperCamelCase , filename=UpperCamelCase , local_dir=UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase="text" ) -> Union[str, Any]:
if model_type == "text":
lowerCamelCase__ : Any = BarkSemanticModel
lowerCamelCase__ : Union[str, Any] = BarkSemanticConfig
lowerCamelCase__ : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCamelCase__ : List[Any] = BarkCoarseModel
lowerCamelCase__ : Union[str, Any] = BarkCoarseConfig
lowerCamelCase__ : Dict = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCamelCase__ : int = BarkFineModel
lowerCamelCase__ : Optional[int] = BarkFineConfig
lowerCamelCase__ : Dict = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCamelCase__ : Dict = f'''{model_type}_small''' if use_small else model_type
lowerCamelCase__ : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCamelCase ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
lowerCamelCase__ : int = torch.load(UpperCamelCase , map_location=UpperCamelCase )
# this is a hack
lowerCamelCase__ : Any = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
lowerCamelCase__ : Union[str, Any] = model_args["""vocab_size"""]
lowerCamelCase__ : Any = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCamelCase__ : Optional[Any] = model_args.pop("""n_head""" )
lowerCamelCase__ : List[Any] = model_args.pop("""n_embd""" )
lowerCamelCase__ : Tuple = model_args.pop("""n_layer""" )
lowerCamelCase__ : str = ConfigClass(**checkpoint["""model_args"""] )
lowerCamelCase__ : Dict = ModelClass(config=UpperCamelCase )
lowerCamelCase__ : Dict = GenerationConfigClass()
lowerCamelCase__ : Dict = model_generation_config
lowerCamelCase__ : List[Any] = checkpoint["""model"""]
# fixup checkpoint
lowerCamelCase__ : str = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(UpperCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
lowerCamelCase__ : Any = k[len(UpperCamelCase ) :]
for old_layer_name in new_layer_name_dict:
lowerCamelCase__ : int = new_k.replace(UpperCamelCase , new_layer_name_dict[old_layer_name] )
lowerCamelCase__ : List[str] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : str = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCamelCase__ : str = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
lowerCamelCase__ : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCamelCase__ : str = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(UpperCamelCase ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(UpperCamelCase ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
lowerCamelCase__ : Any = model.num_parameters(exclude_embeddings=UpperCamelCase )
lowerCamelCase__ : List[Any] = checkpoint["""best_val_loss"""].item()
logger.info(f'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(UpperCamelCase , 3 )} loss''' )
model.eval()
model.to(UpperCamelCase )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False , UpperCamelCase="text" ) -> Any:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCamelCase__ : List[str] = """cpu""" # do conversion on cpu
lowerCamelCase__ : Optional[int] = _get_ckpt_path(UpperCamelCase , use_small=UpperCamelCase )
lowerCamelCase__ : Any = _load_model(UpperCamelCase , UpperCamelCase , model_type=UpperCamelCase , use_small=UpperCamelCase )
# load bark initial model
lowerCamelCase__ : List[str] = _bark_load_model(UpperCamelCase , """cpu""" , model_type=UpperCamelCase , use_small=UpperCamelCase )
if model_type == "text":
lowerCamelCase__ : Tuple = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=UpperCamelCase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
lowerCamelCase__ : Union[str, Any] = 5
lowerCamelCase__ : List[Any] = 10
if model_type in ["text", "coarse"]:
lowerCamelCase__ : Optional[int] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowerCamelCase__ : Tuple = bark_model(UpperCamelCase )[0]
lowerCamelCase__ : List[str] = model(UpperCamelCase )
# take last logits
lowerCamelCase__ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
lowerCamelCase__ : Dict = 3
lowerCamelCase__ : Optional[int] = 8
lowerCamelCase__ : Any = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCamelCase__ : Dict = model(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[str] = bark_model(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Dict:
lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : int = BarkSemanticConfig.from_pretrained(os.path.join(UpperCamelCase , """config.json""" ) )
lowerCamelCase__ : Union[str, Any] = BarkCoarseConfig.from_pretrained(os.path.join(UpperCamelCase , """config.json""" ) )
lowerCamelCase__ : int = BarkFineConfig.from_pretrained(os.path.join(UpperCamelCase , """config.json""" ) )
lowerCamelCase__ : Tuple = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
lowerCamelCase__ : Any = BarkSemanticModel.from_pretrained(UpperCamelCase )
lowerCamelCase__ : Dict = BarkCoarseModel.from_pretrained(UpperCamelCase )
lowerCamelCase__ : str = BarkFineModel.from_pretrained(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
lowerCamelCase__ : int = BarkConfig.from_sub_model_configs(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCamelCase__ : Any = BarkModel(UpperCamelCase )
lowerCamelCase__ : List[Any] = semantic
lowerCamelCase__ : List[Any] = coarseAcoustic
lowerCamelCase__ : str = fineAcoustic
lowerCamelCase__ : List[str] = codec
lowerCamelCase__ : Tuple = bark_generation_config
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
bark.save_pretrained(UpperCamelCase , repo_id=UpperCamelCase , push_to_hub=UpperCamelCase )
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
_A : Dict =parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 702 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[str] ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """audio-spectrogram-transformer"""
def __init__( self: str , UpperCamelCase__: Any=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: List[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: List[str]=10 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Optional[Any]=128 , **UpperCamelCase__: Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : List[Any] = patch_size
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = frequency_stride
lowerCamelCase__ : List[Any] = time_stride
lowerCamelCase__ : str = max_length
lowerCamelCase__ : Dict = num_mel_bins
| 631 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_A : int =logging.get_logger(__name__)
_A : Tuple ={
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _lowercase ( _lowercase ):
a = """dpt"""
def __init__( self: Union[str, Any] , UpperCamelCase__: List[str]=768 , UpperCamelCase__: List[str]=12 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Dict=0.0 , UpperCamelCase__: List[str]=0.02 , UpperCamelCase__: int=1e-12 , UpperCamelCase__: Dict=384 , UpperCamelCase__: int=16 , UpperCamelCase__: str=3 , UpperCamelCase__: Any=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Optional[Any]=[2, 5, 8, 11] , UpperCamelCase__: Optional[Any]="project" , UpperCamelCase__: Tuple=[4, 2, 1, 0.5] , UpperCamelCase__: Union[str, Any]=[96, 192, 384, 768] , UpperCamelCase__: Optional[Any]=256 , UpperCamelCase__: List[Any]=-1 , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: List[str]=True , UpperCamelCase__: List[Any]=0.4 , UpperCamelCase__: Union[str, Any]=255 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Any=[1, 1_024, 24, 24] , UpperCamelCase__: Optional[int]=[0, 1] , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCamelCase__ : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowerCamelCase__ : Union[str, Any] = BitConfig(**UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCamelCase__ : List[str] = BitConfig(**UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Dict = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCamelCase__ : List[str] = backbone_featmap_shape
lowerCamelCase__ : List[str] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : str = None
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : str = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : str = qkv_bias
lowerCamelCase__ : List[str] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
lowerCamelCase__ : Union[str, Any] = readout_type
lowerCamelCase__ : Optional[Any] = reassemble_factors
lowerCamelCase__ : Optional[int] = neck_hidden_sizes
lowerCamelCase__ : int = fusion_hidden_size
lowerCamelCase__ : Dict = head_in_index
lowerCamelCase__ : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ : Union[str, Any] = use_auxiliary_head
lowerCamelCase__ : Tuple = auxiliary_loss_weight
lowerCamelCase__ : List[str] = semantic_loss_ignore_index
lowerCamelCase__ : Optional[int] = semantic_classifier_dropout
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCamelCase__ : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase__ : Any = self.__class__.model_type
return output
| 703 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 0 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_A : Optional[int] =10
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
for i in range(UpperCamelCase , UpperCamelCase ):
if array[i] == target:
return i
return -1
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Dict = len(UpperCamelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : int = (left + right) // 3 + 1
lowerCamelCase__ : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCamelCase__ : Dict = one_third - 1
elif array[two_third] < target:
lowerCamelCase__ : Union[str, Any] = two_third + 1
else:
lowerCamelCase__ : Any = one_third + 1
lowerCamelCase__ : Optional[Any] = two_third - 1
else:
return -1
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = (left + right) // 3 + 1
lowerCamelCase__ : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase , one_third - 1 , UpperCamelCase , UpperCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase , UpperCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Tuple =input('''Enter numbers separated by comma:\n''').strip()
_A : Any =[int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_A : List[str] =int(input('''Enter the number to be found in the list:\n''').strip())
_A : Optional[int] =ite_ternary_search(collection, target)
_A : List[Any] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print('''Not found''')
| 704 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_A : List[str] ={'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int =['''DPTFeatureExtractor''']
_A : Tuple =['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_A : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : Optional[int] = 0
# Number of processes finished
lowerCamelCase__ : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCamelCase__ : Tuple = [0] * no_of_process
# List to include calculation results
lowerCamelCase__ : List[str] = [0] * no_of_process
# Sort by arrival time.
lowerCamelCase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(UpperCamelCase )]
lowerCamelCase__ : List[Any] = [process_name[i] for i in np.argsort(UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCamelCase__ : str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCamelCase__ : Union[str, Any] = arrival_time[i]
lowerCamelCase__ : Any = 0
# Index showing the location of the process being performed
lowerCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
lowerCamelCase__ : Any = 0
for i in range(0 , UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCamelCase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCamelCase__ : int = temp
lowerCamelCase__ : str = i
# Calculate the turn around time
lowerCamelCase__ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCamelCase__ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : int = [0] * no_of_process
for i in range(0 , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A : List[str] =5
_A : Optional[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
_A : Optional[int] =[1, 2, 3, 4, 5]
_A : Dict =[1, 2, 3, 4, 5]
_A : Any =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 631 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> int:
lowerCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase__ : Dict = """"""
else:
lowerCamelCase__ : List[str] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Any = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : str = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : Tuple = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : List[Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowerCamelCase__ : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCamelCase__ : List[str] = dct.pop(UpperCamelCase )
lowerCamelCase__ : List[str] = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCamelCase__ : int = ViTMSNConfig()
lowerCamelCase__ : List[str] = 1000
lowerCamelCase__ : Dict = """datasets/huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase__ : List[str] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase ) , """r""" ) )
lowerCamelCase__ : Tuple = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = 384
lowerCamelCase__ : Tuple = 1536
lowerCamelCase__ : Tuple = 6
elif "l16" in checkpoint_url:
lowerCamelCase__ : Tuple = 1024
lowerCamelCase__ : Tuple = 4096
lowerCamelCase__ : List[Any] = 24
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Dict = 0.1
elif "b4" in checkpoint_url:
lowerCamelCase__ : List[str] = 4
elif "l7" in checkpoint_url:
lowerCamelCase__ : Union[str, Any] = 7
lowerCamelCase__ : List[str] = 1024
lowerCamelCase__ : Any = 4096
lowerCamelCase__ : Tuple = 24
lowerCamelCase__ : Optional[int] = 16
lowerCamelCase__ : List[str] = 0.1
lowerCamelCase__ : int = ViTMSNModel(UpperCamelCase )
lowerCamelCase__ : str = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""target_encoder"""]
lowerCamelCase__ : Tuple = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase )
lowerCamelCase__ : Optional[int] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase , base_model=UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
lowerCamelCase__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : str = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
lowerCamelCase__ : Dict = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase , image_std=UpperCamelCase )
lowerCamelCase__ : str = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase )
lowerCamelCase__ : int = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCamelCase__ : Union[str, Any] = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
lowerCamelCase__ : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
lowerCamelCase__ : int = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
lowerCamelCase__ : Dict = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Any =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 631 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowercase :
def __init__( self: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any]=13 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: int=3 , UpperCamelCase__: Dict=16 , UpperCamelCase__: str=[1, 2, 1] , UpperCamelCase__: Tuple=[2, 2, 4] , UpperCamelCase__: Dict=2 , UpperCamelCase__: List[str]=2.0 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.1 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Any=False , UpperCamelCase__: str=True , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: List[Any]=1e-5 , UpperCamelCase__: Any=True , UpperCamelCase__: str=None , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=10 , UpperCamelCase__: Tuple=8 , UpperCamelCase__: Optional[int]=["stage1", "stage2", "stage3"] , UpperCamelCase__: Dict=[1, 2, 3] , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Dict = batch_size
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Dict = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Any = embed_dim
lowerCamelCase__ : Optional[int] = depths
lowerCamelCase__ : Union[str, Any] = num_heads
lowerCamelCase__ : Optional[int] = window_size
lowerCamelCase__ : str = mlp_ratio
lowerCamelCase__ : List[Any] = qkv_bias
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Any = drop_path_rate
lowerCamelCase__ : Optional[int] = hidden_act
lowerCamelCase__ : Union[str, Any] = use_absolute_embeddings
lowerCamelCase__ : str = patch_norm
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = encoder_stride
lowerCamelCase__ : Union[str, Any] = out_features
lowerCamelCase__ : Union[str, Any] = out_indices
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Any ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : int = MaskFormerSwinModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase__ : Optional[int] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = MaskFormerSwinBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase__ ):
lowerCamelCase__ : Dict = ["""stem"""]
lowerCamelCase__ : Union[str, Any] = MaskFormerSwinBackbone(config=UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ : List[Any] = config_and_inputs
lowerCamelCase__ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : str = MaskFormerSwinModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
def lowerCamelCase_ ( self: Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self: Tuple ):
return
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Dict = [*signature.parameters.keys()]
lowerCamelCase__ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Union[str, Any] = outputs.hidden_states
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swin has a different seq_length
lowerCamelCase__ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase__ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Any = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = 3
lowerCamelCase__ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase__ : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Dict = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCamelCase__: int ):
lowerCamelCase__ : str = 0
return t
def check_equivalence(UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str]={} ):
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : Any = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has'''
F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.'''
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
lowerCamelCase__ : List[str] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
@require_torch
class _lowercase ( unittest.TestCase , _lowercase ):
a = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a = MaskFormerSwinConfig
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = MaskFormerSwinModelTester(self )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = backbone_class(UpperCamelCase__ )
backbone.to(UpperCamelCase__ )
backbone.eval()
lowerCamelCase__ : str = backbone(**UpperCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCamelCase__ : List[str] = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCamelCase__ : Union[str, Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCamelCase__ : List[Any] = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertIsNotNone(outputs.attentions )
| 707 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 631 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = StableDiffusionPanoramaPipeline
a = TEXT_TO_IMAGE_PARAMS
a = TEXT_TO_IMAGE_BATCH_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self: Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCamelCase__ : Dict = DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase__ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCamelCase__ : Any = CLIPTextModel(UpperCamelCase__ )
lowerCamelCase__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__ : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any]=0 ):
lowerCamelCase__ : List[str] = torch.manual_seed(UpperCamelCase__ )
lowerCamelCase__ : str = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str = self.get_dummy_components()
lowerCamelCase__ : str = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
lowerCamelCase__ : int = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Dict = self.get_dummy_inputs(UpperCamelCase__ )
lowerCamelCase__ : Dict = sd_pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Tuple = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self: Any ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self: Union[str, Any] ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Optional[Any] = self.get_dummy_components()
lowerCamelCase__ : Union[str, Any] = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
lowerCamelCase__ : str = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """french fries"""
lowerCamelCase__ : Any = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
lowerCamelCase__ : Any = output.images
lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : Union[str, Any] = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.get_dummy_inputs(UpperCamelCase__ )
lowerCamelCase__ : List[str] = sd_pipe(**UpperCamelCase__ , view_batch_size=2 )
lowerCamelCase__ : Optional[Any] = output.images
lowerCamelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Any = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Dict = self.get_dummy_components()
lowerCamelCase__ : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase__ : Dict = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
lowerCamelCase__ : Any = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = sd_pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Dict = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Dict = self.get_dummy_components()
lowerCamelCase__ : Optional[Any] = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=UpperCamelCase__ )
lowerCamelCase__ : List[str] = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = sd_pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : str = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Any=0 ):
lowerCamelCase__ : List[str] = torch.manual_seed(UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase__ : str = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" )
lowerCamelCase__ : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
lowerCamelCase__ : Union[str, Any] = self.get_inputs()
lowerCamelCase__ : List[Any] = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
lowerCamelCase__ : List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=UpperCamelCase__ )
lowerCamelCase__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
lowerCamelCase__ : Union[str, Any] = self.get_inputs()
lowerCamelCase__ : Optional[Any] = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
lowerCamelCase__ : str = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Union[str, Any] = 0
def callback_fn(UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: torch.FloatTensor ) -> None:
lowerCamelCase__ : int = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase__ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase__ : str = latents[0, -3:, -3:, -1]
lowerCamelCase__ : Union[str, Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase__ : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase__ : Tuple = latents[0, -3:, -3:, -1]
lowerCamelCase__ : int = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Any = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase__ : List[Any] = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" )
lowerCamelCase__ : Any = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
lowerCamelCase__ : Any = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase_ ( self: Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : str = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase__ : Optional[Any] = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" )
lowerCamelCase__ : Any = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
lowerCamelCase__ : Dict = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int = self.get_inputs()
lowerCamelCase__ : Optional[Any] = pipe(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 708 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowercase ( _lowercase ):
a = """"""
a = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a = None # compression type in fsspec. ex: "gzip"
a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , **UpperCamelCase__: List[Any] ):
super().__init__(self , **UpperCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : List[Any] = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : str = os.path.basename(self.file.path.split("""::""" )[0] )
lowerCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Tuple = None
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase__: Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCamelCase__ ).lstrip("""/""" )
def lowerCamelCase_ ( self: Tuple ):
if self.dir_cache is None:
lowerCamelCase__ : Dict = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
lowerCamelCase__ : int = {f["""name"""]: f}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
return self.file.open().read()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Union[str, Any] = self._strip_protocol(UpperCamelCase__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowercase ( _lowercase ):
a = """bz2"""
a = """bz2"""
a = """.bz2"""
class _lowercase ( _lowercase ):
a = """gzip"""
a = """gzip"""
a = """.gz"""
class _lowercase ( _lowercase ):
a = """lz4"""
a = """lz4"""
a = """.lz4"""
class _lowercase ( _lowercase ):
a = """xz"""
a = """xz"""
a = """.xz"""
class _lowercase ( _lowercase ):
a = """zstd"""
a = """zstd"""
a = """.zst"""
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: str = "rb" , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[dict] = None , UpperCamelCase__: int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__: Dict , ):
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Tuple = self.file.__enter__
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Any , *UpperCamelCase__: str , **UpperCamelCase__: Any ):
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def __iter__( self: Any ):
return iter(self._file )
def lowerCamelCase_ ( self: List[Any] ):
return next(self._file )
def __getattr__( self: List[str] , UpperCamelCase__: Dict ):
return getattr(self._file , UpperCamelCase__ )
def fixed_enter(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) )
lowerCamelCase__ : Optional[Any] = fixed_enter
| 631 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_A : Tuple ={
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
lowerCamelCase__ : Optional[int] = []
for k, v in d.items():
lowerCamelCase__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase , UpperCamelCase , sep=UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase )
lowerCamelCase__ : Any = argparse.Namespace()
with open(UpperCamelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ : int = yaml.load(UpperCamelCase , Loader=yaml.FullLoader )
lowerCamelCase__ : Tuple = flatten_yaml_as_dict(UpperCamelCase )
for k, v in flat_cfg.items():
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase , str(UpperCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaConfig()
lowerCamelCase__ : str = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ : Optional[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : int = 384
else:
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ : Tuple = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ : str = 384
else:
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ : Dict = 151
lowerCamelCase__ : str = 512
lowerCamelCase__ : List[Any] = """ade20k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ : Tuple = 21
lowerCamelCase__ : Optional[int] = 512
lowerCamelCase__ : List[Any] = """pascal-voc-id2label.json"""
lowerCamelCase__ : Tuple = True
# orig_config
lowerCamelCase__ : Optional[int] = load_orig_config_file(UpperCamelCase )
assert getattr(UpperCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : int = getattr(UpperCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : Any = getattr(UpperCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : str = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ : Tuple = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[Any] = dct.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Tuple:
if base_model:
lowerCamelCase__ : Optional[int] = """"""
else:
lowerCamelCase__ : Optional[Any] = """mobilevitv2."""
lowerCamelCase__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Optional[Any] = k[8:]
else:
lowerCamelCase__ : Optional[Any] = k
if ".block." in k:
lowerCamelCase__ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ : Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ : str = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase__ : List[str] = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ : Optional[Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ : Dict = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : int = [0, 1]
elif i == 4:
lowerCamelCase__ : str = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ : List[Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ : Optional[int] = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Union[str, Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ : Tuple = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : str = get_mobilevitva_config(UpperCamelCase , UpperCamelCase )
# load original state_dict
lowerCamelCase__ : List[str] = torch.load(UpperCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation(UpperCamelCase ).eval()
lowerCamelCase__ : Tuple = False
else:
lowerCamelCase__ : int = MobileViTVaForImageClassification(UpperCamelCase ).eval()
lowerCamelCase__ : Optional[Any] = False
# remove and rename some keys of load the original model
lowerCamelCase__ : Tuple = checkpoint
remove_unused_keys(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase , base_model=UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load modified state_dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ : Dict = outputs.logits
lowerCamelCase__ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Dict =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 631 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Tuple =logging.get_logger(__name__)
_A : str ={
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _lowercase ( _lowercase ):
a = """wav2vec2"""
def __init__( self: Union[str, Any] , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Any=12 , UpperCamelCase__: Optional[Any]=3_072 , UpperCamelCase__: Optional[Any]="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Tuple=0.1 , UpperCamelCase__: Union[str, Any]=0.0 , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: str=1e-5 , UpperCamelCase__: int="group" , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: List[str]=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__: Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__: Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=128 , UpperCamelCase__: Union[str, Any]=16 , UpperCamelCase__: str=False , UpperCamelCase__: List[str]=True , UpperCamelCase__: List[Any]=0.05 , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: Union[str, Any]=2 , UpperCamelCase__: int=0.0 , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: Optional[int]=0 , UpperCamelCase__: List[str]=320 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Tuple=100 , UpperCamelCase__: Tuple=256 , UpperCamelCase__: Tuple=256 , UpperCamelCase__: Tuple=0.1 , UpperCamelCase__: Dict="sum" , UpperCamelCase__: str=False , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: int=256 , UpperCamelCase__: List[str]=(512, 512, 512, 512, 1_500) , UpperCamelCase__: List[str]=(5, 3, 3, 1, 1) , UpperCamelCase__: Union[str, Any]=(1, 2, 3, 1, 1) , UpperCamelCase__: str=512 , UpperCamelCase__: Optional[Any]=0 , UpperCamelCase__: Tuple=1 , UpperCamelCase__: Any=2 , UpperCamelCase__: Any=False , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Union[str, Any]=2 , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: str=None , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Dict , ):
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : Optional[Any] = feat_extract_norm
lowerCamelCase__ : Union[str, Any] = feat_extract_activation
lowerCamelCase__ : List[Any] = list(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = list(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = list(UpperCamelCase__ )
lowerCamelCase__ : Tuple = conv_bias
lowerCamelCase__ : Optional[Any] = num_conv_pos_embeddings
lowerCamelCase__ : Dict = num_conv_pos_embedding_groups
lowerCamelCase__ : Dict = len(self.conv_dim )
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : List[Any] = hidden_dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Dict = activation_dropout
lowerCamelCase__ : Optional[Any] = feat_proj_dropout
lowerCamelCase__ : List[Any] = final_dropout
lowerCamelCase__ : Optional[int] = layerdrop
lowerCamelCase__ : Dict = layer_norm_eps
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Any = do_stable_layer_norm
lowerCamelCase__ : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ : str = apply_spec_augment
lowerCamelCase__ : int = mask_time_prob
lowerCamelCase__ : Any = mask_time_length
lowerCamelCase__ : Optional[int] = mask_time_min_masks
lowerCamelCase__ : List[str] = mask_feature_prob
lowerCamelCase__ : List[str] = mask_feature_length
lowerCamelCase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__ : Any = num_codevectors_per_group
lowerCamelCase__ : Tuple = num_codevector_groups
lowerCamelCase__ : str = contrastive_logits_temperature
lowerCamelCase__ : Optional[Any] = feat_quantizer_dropout
lowerCamelCase__ : Union[str, Any] = num_negatives
lowerCamelCase__ : List[Any] = codevector_dim
lowerCamelCase__ : Union[str, Any] = proj_codevector_dim
lowerCamelCase__ : List[Any] = diversity_loss_weight
# ctc loss
lowerCamelCase__ : Dict = ctc_loss_reduction
lowerCamelCase__ : Any = ctc_zero_infinity
# adapter
lowerCamelCase__ : str = add_adapter
lowerCamelCase__ : str = adapter_kernel_size
lowerCamelCase__ : List[str] = adapter_stride
lowerCamelCase__ : Dict = num_adapter_layers
lowerCamelCase__ : Dict = output_hidden_size or hidden_size
lowerCamelCase__ : Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase__ : Optional[int] = list(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = list(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = list(UpperCamelCase__ )
lowerCamelCase__ : int = xvector_output_dim
@property
def lowerCamelCase_ ( self: str ):
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 710 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 0 |
import random
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase = False ) -> dict:
lowerCamelCase__ : dict = {i: [] for i in range(UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase ):
for j in range(i + 1 , UpperCamelCase ):
if random.random() < probability:
graph[i].append(UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase )
return graph
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> dict:
return {
i: [j for j in range(UpperCamelCase ) if i != j] for i in range(UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowercase ( unittest.TestCase ):
a = JukeboxTokenizer
a = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowerCamelCase_ ( self: str ):
import torch
lowerCamelCase__ : Optional[int] = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
lowerCamelCase__ : Tuple = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCamelCase__ : List[str] = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase_ ( self: Optional[Any] ):
import torch
lowerCamelCase__ : str = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
lowerCamelCase__ : Tuple = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCamelCase__ : Any = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Tuple ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 713 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A : List[Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : List[str] = b.T
lowerCamelCase__ : Optional[Any] = np.sum(np.square(UpperCamelCase ) , axis=1 )
lowerCamelCase__ : str = np.sum(np.square(UpperCamelCase ) , axis=0 )
lowerCamelCase__ : List[Any] = np.matmul(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCamelCase__ : str = x.reshape(-1 , 3 )
lowerCamelCase__ : int = squared_euclidean_distance(UpperCamelCase , UpperCamelCase )
return np.argmin(UpperCamelCase , axis=1 )
class _lowercase ( _lowercase ):
'''simple docstring'''
a = ["""pixel_values"""]
def __init__( self: List[str] , UpperCamelCase__: Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase__: bool = True , UpperCamelCase__: Dict[str, int] = None , UpperCamelCase__: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__: bool = True , UpperCamelCase__: bool = True , **UpperCamelCase__: int , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : Any = size if size is not None else {"""height""": 256, """width""": 256}
lowerCamelCase__ : List[str] = get_size_dict(UpperCamelCase__ )
lowerCamelCase__ : Tuple = np.array(UpperCamelCase__ ) if clusters is not None else None
lowerCamelCase__ : Optional[Any] = do_resize
lowerCamelCase__ : List[str] = size
lowerCamelCase__ : Tuple = resample
lowerCamelCase__ : str = do_normalize
lowerCamelCase__ : Tuple = do_color_quantize
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: np.ndarray , UpperCamelCase__: Dict[str, int] , UpperCamelCase__: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__: List[Any] , ):
lowerCamelCase__ : Tuple = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: np.ndarray , UpperCamelCase__: Optional[Union[str, ChannelDimension]] = None , ):
lowerCamelCase__ : Tuple = rescale(image=UpperCamelCase__ , scale=1 / 127.5 , data_format=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image - 1
return image
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: ImageInput , UpperCamelCase__: bool = None , UpperCamelCase__: Dict[str, int] = None , UpperCamelCase__: PILImageResampling = None , UpperCamelCase__: bool = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase__: Optional[Union[str, TensorType]] = None , UpperCamelCase__: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCamelCase__: Any , ):
lowerCamelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : str = size if size is not None else self.size
lowerCamelCase__ : Any = get_size_dict(UpperCamelCase__ )
lowerCamelCase__ : List[str] = resample if resample is not None else self.resample
lowerCamelCase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowerCamelCase__ : Optional[Any] = clusters if clusters is not None else self.clusters
lowerCamelCase__ : Any = np.array(UpperCamelCase__ )
lowerCamelCase__ : str = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase__ : Optional[Any] = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowerCamelCase__ : Tuple = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_normalize:
lowerCamelCase__ : Dict = [self.normalize(image=UpperCamelCase__ ) for image in images]
if do_color_quantize:
lowerCamelCase__ : Any = [to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowerCamelCase__ : Any = np.array(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = color_quantize(UpperCamelCase__ , UpperCamelCase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowerCamelCase__ : Dict = images.shape[0]
lowerCamelCase__ : Optional[int] = images.reshape(UpperCamelCase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowerCamelCase__ : Union[str, Any] = list(UpperCamelCase__ )
else:
lowerCamelCase__ : List[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowerCamelCase__ : int = {"""input_ids""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 714 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_A : Any =logging.get_logger(__name__)
_A : Tuple =Dict[str, Any]
_A : str =List[Prediction]
@add_end_docstrings(_lowercase )
class _lowercase ( _lowercase ):
def __init__( self: str , *UpperCamelCase__: str , **UpperCamelCase__: int ):
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: Any ):
lowerCamelCase__ : Any = {}
if "threshold" in kwargs:
lowerCamelCase__ : Dict = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self: int , *UpperCamelCase__: str , **UpperCamelCase__: int ):
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : List[str] = load_image(UpperCamelCase__ )
lowerCamelCase__ : Dict = torch.IntTensor([[image.height, image.width]] )
lowerCamelCase__ : Tuple = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
lowerCamelCase__ : Any = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
lowerCamelCase__ : int = target_size
return inputs
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = model_inputs.pop("""target_size""" )
lowerCamelCase__ : Optional[int] = self.model(**UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
lowerCamelCase__ : Optional[Any] = model_inputs["""bbox"""]
return model_outputs
def lowerCamelCase_ ( self: str , UpperCamelCase__: int , UpperCamelCase__: Optional[int]=0.9 ):
lowerCamelCase__ : Optional[int] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCamelCase__ : Any = target_size[0].tolist()
def unnormalize(UpperCamelCase__: Tuple ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
lowerCamelCase__ : Any = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCamelCase__ : Dict = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCamelCase__ : List[str] = [unnormalize(UpperCamelCase__ ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
lowerCamelCase__ : Union[str, Any] = ["""score""", """label""", """box"""]
lowerCamelCase__ : Dict = [dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) for vals in zip(scores.tolist() , UpperCamelCase__ , UpperCamelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCamelCase__ : str = self.image_processor.post_process_object_detection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = raw_annotations[0]
lowerCamelCase__ : str = raw_annotation["""scores"""]
lowerCamelCase__ : List[Any] = raw_annotation["""labels"""]
lowerCamelCase__ : List[str] = raw_annotation["""boxes"""]
lowerCamelCase__ : Any = scores.tolist()
lowerCamelCase__ : Tuple = [self.model.config.idalabel[label.item()] for label in labels]
lowerCamelCase__ : List[str] = [self._get_bounding_box(UpperCamelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCamelCase__ : Dict = ["""score""", """label""", """box"""]
lowerCamelCase__ : Optional[int] = [
dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def lowerCamelCase_ ( self: str , UpperCamelCase__: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
lowerCamelCase__ : Dict = box.int().tolist()
lowerCamelCase__ : Optional[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 715 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 0 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : Optional[int] = old_name
if "patch_embed" in old_name:
lowerCamelCase__ : List[str] = old_name.split(""".""" )
if layer == "0":
lowerCamelCase__ : int = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
lowerCamelCase__ : str = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
lowerCamelCase__ : Any = old_name.replace("""3""" , """convolution2""" )
else:
lowerCamelCase__ : Optional[Any] = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , UpperCamelCase ):
lowerCamelCase__ : str = r"""\b\d{2}\b"""
if bool(re.search(UpperCamelCase , UpperCamelCase ) ):
lowerCamelCase__ : Optional[Any] = re.search(r"""\d\.\d\d.""" , UpperCamelCase ).group()
else:
lowerCamelCase__ : Optional[int] = re.search(r"""\d\.\d.""" , UpperCamelCase ).group()
if int(match[0] ) < 6:
lowerCamelCase__ : List[Any] = old_name.replace(UpperCamelCase , """""" )
lowerCamelCase__ : List[str] = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
lowerCamelCase__ : Optional[Any] = """intermediate_stages.""" + trimmed_name
else:
lowerCamelCase__ : Optional[Any] = old_name.replace(UpperCamelCase , """""" )
if int(match[2] ) < num_meta4D_last_stage:
lowerCamelCase__ : List[str] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
lowerCamelCase__ : str = str(int(match[2] ) - num_meta4D_last_stage )
lowerCamelCase__ : List[Any] = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
lowerCamelCase__ : Any = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
lowerCamelCase__ : Union[str, Any] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
lowerCamelCase__ : str = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
lowerCamelCase__ : str = trimmed_name.replace("""fc2""" , """linear_out""" )
lowerCamelCase__ : List[str] = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , UpperCamelCase ):
lowerCamelCase__ : Dict = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
lowerCamelCase__ : Union[str, Any] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCamelCase__ : Dict = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCamelCase__ : List[str] = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
lowerCamelCase__ : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
lowerCamelCase__ : Tuple = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
lowerCamelCase__ : Any = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
lowerCamelCase__ : int = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCamelCase__ : Dict = new_name.replace("""norm""" , """layernorm""" )
lowerCamelCase__ : Dict = """efficientformer.""" + new_name
else:
lowerCamelCase__ : str = """efficientformer.encoder.""" + new_name
return new_name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[int]:
for key in checkpoint.copy().keys():
lowerCamelCase__ : List[Any] = checkpoint.pop(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = val
return checkpoint
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : Optional[int] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : Tuple = torch.load(UpperCamelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ : Any = EfficientFormerConfig.from_json_file(UpperCamelCase )
lowerCamelCase__ : List[str] = EfficientFormerForImageClassificationWithTeacher(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
lowerCamelCase__ : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
lowerCamelCase__ : str = convert_torch_checkpoint(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
lowerCamelCase__ : Any = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : List[Any] = 224
lowerCamelCase__ : Union[str, Any] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
lowerCamelCase__ : List[str] = processor(images=UpperCamelCase , return_tensors="""pt""" ).pixel_values
# original processing pipeline
lowerCamelCase__ : Optional[Any] = Compose(
[
Resize(UpperCamelCase , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(UpperCamelCase ),
ToTensor(),
Normalize(UpperCamelCase , UpperCamelCase ),
] )
lowerCamelCase__ : List[str] = image_transforms(UpperCamelCase ).unsqueeze(0 )
assert torch.allclose(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : int = model(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = outputs.logits
lowerCamelCase__ : Any = (1, 1000)
if "l1" in model_name:
lowerCamelCase__ : Optional[Any] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , UpperCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCamelCase__ : List[str] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , UpperCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCamelCase__ : Tuple = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(UpperCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=UpperCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase , )
if __name__ == "__main__":
_A : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
_A : int =parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 716 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 0 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_A : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name
_A : Dict =256
class _lowercase ( _lowercase ):
a = ["""melgan"""]
def __init__( self: int , UpperCamelCase__: SpectrogramNotesEncoder , UpperCamelCase__: SpectrogramContEncoder , UpperCamelCase__: TaFilmDecoder , UpperCamelCase__: DDPMScheduler , UpperCamelCase__: OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
lowerCamelCase__ : str = math.log(1e-5 ) # Matches MelGAN training.
lowerCamelCase__ : List[str] = 4.0 # Largest value for most examples
lowerCamelCase__ : Tuple = 128
self.register_modules(
notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int]=(-1.0, 1.0) , UpperCamelCase__: Tuple=False ):
lowerCamelCase__ : Any = output_range
if clip:
lowerCamelCase__ : Union[str, Any] = torch.clip(UpperCamelCase__ , self.min_value , self.max_value )
# Scale to [0, 1].
lowerCamelCase__ : int = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[Any] , UpperCamelCase__: int=(-1.0, 1.0) , UpperCamelCase__: List[str]=False ):
lowerCamelCase__ : str = input_range
lowerCamelCase__ : Dict = torch.clip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if clip else outputs
# Scale to [0, 1].
lowerCamelCase__ : List[Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Any = input_tokens > 0
lowerCamelCase__ : List[str] = self.notes_encoder(
encoder_input_tokens=UpperCamelCase__ , encoder_inputs_mask=UpperCamelCase__ )
lowerCamelCase__ : Any = self.continuous_encoder(
encoder_inputs=UpperCamelCase__ , encoder_inputs_mask=UpperCamelCase__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Optional[Any] = noise_time
if not torch.is_tensor(UpperCamelCase__ ):
lowerCamelCase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCamelCase__ ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : Tuple = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase__ : List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowerCamelCase__ : Tuple = self.decoder(
encodings_and_masks=UpperCamelCase__ , decoder_input_tokens=UpperCamelCase__ , decoder_noise_time=UpperCamelCase__ )
return logits
@torch.no_grad()
def __call__( self: List[str] , UpperCamelCase__: List[List[int]] , UpperCamelCase__: Optional[torch.Generator] = None , UpperCamelCase__: int = 100 , UpperCamelCase__: bool = True , UpperCamelCase__: str = "numpy" , UpperCamelCase__: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__: int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCamelCase__ )}.''' )
lowerCamelCase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowerCamelCase__ : Any = np.zeros([1, 0, self.n_dims] , np.floataa )
lowerCamelCase__ : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase__ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCamelCase__ ):
if i == 0:
lowerCamelCase__ : Optional[Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowerCamelCase__ : List[Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowerCamelCase__ : Optional[int] = ones
lowerCamelCase__ : Optional[int] = self.scale_features(
UpperCamelCase__ , output_range=[-1.0, 1.0] , clip=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase__ , continuous_mask=UpperCamelCase__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowerCamelCase__ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCamelCase__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCamelCase__ : str = self.decode(
encodings_and_masks=UpperCamelCase__ , input_tokens=UpperCamelCase__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowerCamelCase__ : Tuple = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowerCamelCase__ : Tuple = self.scale_to_features(UpperCamelCase__ , input_range=[-1.0, 1.0] )
lowerCamelCase__ : Optional[Any] = mel[:1]
lowerCamelCase__ : int = mel.cpu().float().numpy()
lowerCamelCase__ : Optional[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase__ , UpperCamelCase__ )
logger.info("""Generated segment""" , UpperCamelCase__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
lowerCamelCase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowerCamelCase__ : str = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCamelCase__ )
| 717 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : List[str] =logging.get_logger(__name__)
_A : str ={
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowercase ( _lowercase ):
a = """convbert"""
def __init__( self: Any , UpperCamelCase__: Dict=30_522 , UpperCamelCase__: List[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: str=12 , UpperCamelCase__: int=3_072 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=1e-12 , UpperCamelCase__: List[str]=1 , UpperCamelCase__: List[Any]=0 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: int=768 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Tuple=9 , UpperCamelCase__: Optional[int]=1 , UpperCamelCase__: str=None , **UpperCamelCase__: int , ):
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Dict = embedding_size
lowerCamelCase__ : Union[str, Any] = head_ratio
lowerCamelCase__ : Optional[int] = conv_kernel_size
lowerCamelCase__ : Any = num_groups
lowerCamelCase__ : int = classifier_dropout
class _lowercase ( _lowercase ):
@property
def lowerCamelCase_ ( self: str ):
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase__ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 718 |
'''simple docstring'''
_A : List[str] ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 631 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowercase ( yaml.SafeLoader ):
def lowerCamelCase_ ( self: str , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : int = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCamelCase__ : Optional[Any] = [tuple(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else key for key in keys]
lowerCamelCase__ : List[Any] = Counter(UpperCamelCase__ )
lowerCamelCase__ : int = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=False ):
lowerCamelCase__ : str = super().construct_mapping(UpperCamelCase__ , deep=UpperCamelCase__ )
self._check_no_duplicates_on_constructed_node(UpperCamelCase__ )
return mapping
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple[Optional[str], str]:
lowerCamelCase__ : Dict = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCamelCase__ : Any = full_content[1:].index("""---""" ) + 1
lowerCamelCase__ : int = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCamelCase )
class _lowercase ( _lowercase ):
# class attributes
a = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase_ ( cls: List[str] , UpperCamelCase__: Path ):
with open(UpperCamelCase__ , encoding="""utf-8""" ) as readme_file:
lowerCamelCase__ : Union[str, Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCamelCase__ )
else:
return cls()
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Path ):
if path.exists():
with open(UpperCamelCase__ , encoding="""utf-8""" ) as readme_file:
lowerCamelCase__ : List[str] = readme_file.read()
else:
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Union[str, Any] = self._to_readme(UpperCamelCase__ )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[str] = None ):
if readme_content is not None:
lowerCamelCase__ : Tuple = _split_yaml_from_readme(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
lowerCamelCase__ : Tuple = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase_ ( cls: List[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Tuple = yaml.load(UpperCamelCase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCamelCase__ : List[str] = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCamelCase__ , allow_unicode=UpperCamelCase__ , encoding="""utf-8""" , ).decode("""utf-8""" )
_A : List[Any] ={
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_A : Tuple =ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
_A : int =ap.parse_args()
_A : Optional[Any] =Path(args.readme_filepath)
_A : List[str] =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( _lowercase ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: int=50_265 , UpperCamelCase__: int=1_024 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Dict=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: int=512 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Any=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=1 , UpperCamelCase__: List[str]=0 , UpperCamelCase__: Union[str, Any]=2 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Any = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : int = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : Optional[int] = decoder_layerdrop
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Any = scale_embedding
lowerCamelCase__ : Optional[int] = use_learned_position_embeddings
lowerCamelCase__ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 631 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.