code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Any:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
lowercase__ : List[Any] = torch.load(__lowerCamelCase , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
lowercase__ : int = convert_pytorch_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowercase__ : Dict = convert_pytorch_sharded_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
return flax_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(__lowerCamelCase ) -> bool:
return len(set(__lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowercase__ : int = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowercase__ : Any = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowercase__ : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ : Optional[int] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowercase__ : List[str] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# convert pytorch tensor to numpy
lowercase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowercase__ : str = flax_model.params['''params''']
else:
lowercase__ : Optional[int] = flax_model.params
lowercase__ : Optional[Any] = flatten_dict(__lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Tuple = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__lowerCamelCase )
lowercase__ : int = {}
lowercase__ : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : Optional[Any] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : List[str] = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowercase__ : int = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : Tuple = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Any = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
import torch
# Load the index
lowercase__ : Dict = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowercase__ : Optional[int] = torch.load(__lowerCamelCase )
lowercase__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Optional[Any] = flax_model.params['''params''']
lowercase__ : List[Any] = flatten_dict(__lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowercase__ : Union[str, Any] = flax_model.params
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : List[str] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Tuple = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : str = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
continue
if "var" in flax_key[-1]:
lowercase__ : str = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : List[str] = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
lowercase__ : Optional[int] = getattr(__lowerCamelCase , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCamelCase , '''rb''' ) as state_f:
try:
lowercase__ : str = from_bytes(__lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase__ : Any = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa , __lowerCamelCase ) ).values()
if any(__lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase__ : Union[str, Any] = jax.tree_util.tree_map(
lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCamelCase )
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : List[str] = pt_model.state_dict()
lowercase__ : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowercase__ : int = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowercase__ : List[str] = []
lowercase__ : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ : List[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
lowercase__ : Optional[int] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCamelCase ) not in pt_model_dict:
# conv layer
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : List[str] = jnp.transpose(__lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ) not in pt_model_dict:
# linear layer
lowercase__ : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowercase__ : Any = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowercase__ : Union[str, Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowercase__ : Dict = '''.'''.join(__lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowercase__ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowercase__ : str = key.split('''.''' )
lowercase__ : Optional[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowercase__ : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowercase__ : str = key_components[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[int] = key_components[:-3] + [name]
lowercase__ : List[str] = '''.'''.join(__lowerCamelCase )
lowercase__ : List[Any] = key
if flax_key in special_pt_names:
lowercase__ : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowercase__ : List[str] = np.asarray(__lowerCamelCase ) if not isinstance(__lowerCamelCase , np.ndarray ) else flax_tensor
lowercase__ : List[str] = torch.from_numpy(__lowerCamelCase )
# remove from missing keys
missing_keys.remove(__lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCamelCase )
pt_model.load_state_dict(__lowerCamelCase )
# re-transform missing_keys to list
lowercase__ : Optional[Any] = list(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(__lowerCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 302
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "maskformer"
lowerCAmelCase : Any = {"hidden_size": "mask_feature_size"}
lowerCAmelCase : Optional[int] = ["resnet", "swin"]
lowerCAmelCase : str = ["detr"]
def __init__( self : int ,_snake_case : int = 256 ,_snake_case : int = 256 ,_snake_case : float = 0.1 ,_snake_case : bool = False ,_snake_case : Optional[Dict] = None ,_snake_case : Optional[Dict] = None ,_snake_case : float = 0.02 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 20.0 ,_snake_case : Optional[bool] = None ,**_snake_case : Optional[Any] ,) -> Dict:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ : Any = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,)
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[str] = backbone_config.pop('''model_type''' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ : Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(_snake_case ,_snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase__ : Optional[Any] = config_class.from_dict(_snake_case )
lowercase__ : List[Any] = backbone_config
lowercase__ : List[Any] = decoder_config
# main feature dimension for the model
lowercase__ : List[str] = fpn_feature_size
lowercase__ : int = mask_feature_size
# initializer
lowercase__ : str = init_std
lowercase__ : str = init_xavier_std
# Hungarian matcher && loss
lowercase__ : Optional[int] = cross_entropy_weight
lowercase__ : List[Any] = dice_weight
lowercase__ : List[str] = mask_weight
lowercase__ : str = use_auxiliary_loss
lowercase__ : Optional[int] = no_object_weight
lowercase__ : Optional[Any] = output_auxiliary_logits
lowercase__ : Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase__ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return cls(
backbone_config=_snake_case ,decoder_config=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : str ) -> Dict[str, any]:
"""simple docstring"""
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : List[Any] = self.decoder_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 302
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Tuple = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') ,up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') ,)
return model
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ : Dict = self.dummy_uncond_unet
lowercase__ : Any = ScoreSdeVeScheduler()
lowercase__ : List[str] = ScoreSdeVePipeline(unet=_snake_case ,scheduler=_snake_case )
sde_ve.to(_snake_case )
sde_ve.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : Optional[int] = sde_ve(num_inference_steps=2 ,output_type='''numpy''' ,generator=_snake_case ).images
lowercase__ : Optional[Any] = torch.manual_seed(0 )
lowercase__ : str = sde_ve(num_inference_steps=2 ,output_type='''numpy''' ,generator=_snake_case ,return_dict=_snake_case )[
0
]
lowercase__ : Tuple = image[0, -3:, -3:, -1]
lowercase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = '''google/ncsnpp-church-256'''
lowercase__ : List[str] = UNetaDModel.from_pretrained(_snake_case )
lowercase__ : Optional[Any] = ScoreSdeVeScheduler.from_pretrained(_snake_case )
lowercase__ : List[Any] = ScoreSdeVePipeline(unet=_snake_case ,scheduler=_snake_case )
sde_ve.to(_snake_case )
sde_ve.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : Any = sde_ve(num_inference_steps=10 ,output_type='''numpy''' ,generator=_snake_case ).images
lowercase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ : Tuple = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 302
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : Dict = [3, 3, 3, 3]
lowercase__ : str = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : List[str] = [4, 4, 4, 4]
lowercase__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
else:
lowercase__ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[int] = 96
elif "small" in model_name:
lowercase__ : Union[str, Any] = 96
elif "base" in model_name:
lowercase__ : Tuple = 1_28
elif "large" in model_name:
lowercase__ : Any = 1_92
elif "xlarge" in model_name:
lowercase__ : Any = 2_56
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 3_52
# set label information
lowercase__ : List[Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ : Optional[int] = '''imagenet-22k-id2label.json'''
else:
lowercase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowercase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ : Dict = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ : Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ : Union[str, Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ : Dict = '''layernorm.bias'''
if "head" in name:
lowercase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__ : List[Any] = '''focalnet.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[str]:
# fmt: off
lowercase__ : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ : Optional[int] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowercase__ : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ : int = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
lowercase__ : List[Any] = get_focalnet_config(__lowerCamelCase )
lowercase__ : Optional[int] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : int = BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowercase__ : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase__ : List[str] = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
lowercase__ : Optional[Any] = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__ : Optional[int] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Dict = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__ : List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 1
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = CustomTokenizer
pass
| 302
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCAmelCase : int = "ChineseCLIPImageProcessor"
lowerCAmelCase : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple ,_snake_case : str=None ,_snake_case : Union[str, Any]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
lowercase__ : List[Any] = self.image_processor
def __call__( self : List[Any] ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : List[Any]=None ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : List[Any] ,**_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,*_snake_case : Tuple ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
| 302
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> bool:
lowercase__ : Union[str, Any] = len(__lowerCamelCase ) + 1
lowercase__ : List[str] = len(__lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase__ : List[Any] = [[0 for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
# since string of zero length match pattern of zero length
lowercase__ : Dict = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCamelCase ):
lowercase__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCamelCase ):
lowercase__ : List[Any] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCamelCase ):
for j in range(1 , __lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase__ : int = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase__ : int = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase__ : Dict = dp[i - 1][j]
else:
lowercase__ : Union[str, Any] = 0
else:
lowercase__ : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowerCAmelCase_ = 'aab'
lowerCAmelCase_ = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 302
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 1
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Any = ReformerTokenizer
lowerCAmelCase : Dict = ReformerTokenizerFast
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = False
lowerCAmelCase : Dict = True
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().setUp()
lowercase__ : List[Any] = ReformerTokenizer(_snake_case ,keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = '''<s>'''
lowercase__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<unk>''' )
self.assertEqual(vocab_keys[1] ,'''<s>''' )
self.assertEqual(vocab_keys[-1] ,'''j''' )
self.assertEqual(len(_snake_case ) ,1_000 )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,1_000 )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer()
lowercase__ : List[Any] = '''I was born in 92000, and this is falsé.'''
lowercase__ : int = tokenizer.tokenize(_snake_case )
lowercase__ : List[str] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
lowercase__ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
lowercase__ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
lowercase__ : Tuple = self.get_rust_tokenizer()
lowercase__ : Optional[Any] = tokenizer.encode(_snake_case )
lowercase__ : int = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any]=15 ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(_snake_case ,**_snake_case )
# Simple input
lowercase__ : List[Any] = '''This is a simple input'''
lowercase__ : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__ : Any = ('''This is a simple input''', '''This is a pair''')
lowercase__ : Tuple = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_snake_case ,tokenizer_r.encode ,_snake_case ,max_length=_snake_case ,padding='''max_length''' )
# Simple input
self.assertRaises(_snake_case ,tokenizer_r.encode_plus ,_snake_case ,max_length=_snake_case ,padding='''max_length''' )
# Simple input
self.assertRaises(
_snake_case ,tokenizer_r.batch_encode_plus ,_snake_case ,max_length=_snake_case ,padding='''max_length''' ,)
# Pair input
self.assertRaises(_snake_case ,tokenizer_r.encode ,_snake_case ,max_length=_snake_case ,padding='''max_length''' )
# Pair input
self.assertRaises(_snake_case ,tokenizer_r.encode_plus ,_snake_case ,max_length=_snake_case ,padding='''max_length''' )
# Pair input
self.assertRaises(
_snake_case ,tokenizer_r.batch_encode_plus ,_snake_case ,max_length=_snake_case ,padding='''max_length''' ,)
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = ReformerTokenizer(_snake_case ,keep_accents=_snake_case )
lowercase__ : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) ,[285, 46, 10, 170, 382] ,)
lowercase__ : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowercase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,)
lowercase__ : str = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
@cached_property
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = '''Hello World!'''
lowercase__ : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_snake_case ,self.big_tokenizer.encode(_snake_case ) )
@slow
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase__ : str = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_snake_case ,self.big_tokenizer.encode(_snake_case ) )
@require_torch
@slow
def UpperCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase__ : Tuple = ''' '''.join(_snake_case )
lowercase__ : str = self.big_tokenizer.encode_plus(_snake_case ,return_tensors='''pt''' )
lowercase__ : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] ,return_tensors='''pt''' )
lowercase__ : Dict = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase__ : int = encoded_sequence['''input_ids'''].shape
lowercase__ : Union[str, Any] = ReformerModel(_snake_case )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_snake_case )
model(**_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : Any = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase__ : Any = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name='''google/reformer-crime-and-punishment''' ,revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' ,padding=_snake_case ,sequences=_snake_case ,)
| 302
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
lowerCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ : str = import_module('''tasks''' )
try:
lowercase__ : List[str] = getattr(__lowerCamelCase , model_args.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowercase__ : Dict[int, str] = dict(enumerate(__lowerCamelCase ) )
lowercase__ : Optional[int] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ : str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase , __lowerCamelCase ) -> Tuple[List[int], List[int]]:
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=2 )
lowercase__ , lowercase__ : Tuple = preds.shape
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
lowercase__ : Tuple = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase ) -> Dict:
lowercase__ , lowercase__ : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase__ : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase__ : Optional[int] = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(__lowerCamelCase )
lowercase__ , lowercase__ : Tuple = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "distilbert"
lowerCAmelCase : List[str] = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self : Tuple ,_snake_case : Optional[int]=30_522 ,_snake_case : List[Any]=512 ,_snake_case : List[str]=False ,_snake_case : Optional[Any]=6 ,_snake_case : str=12 ,_snake_case : int=768 ,_snake_case : Optional[Any]=4 * 768 ,_snake_case : Tuple=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : str="gelu" ,_snake_case : List[Any]=0.02 ,_snake_case : Tuple=0.1 ,_snake_case : Tuple=0.2 ,_snake_case : Optional[int]=0 ,**_snake_case : Optional[Any] ,) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Any = max_position_embeddings
lowercase__ : List[Any] = sinusoidal_pos_embds
lowercase__ : List[Any] = n_layers
lowercase__ : Optional[int] = n_heads
lowercase__ : Tuple = dim
lowercase__ : List[Any] = hidden_dim
lowercase__ : str = dropout
lowercase__ : Any = attention_dropout
lowercase__ : Tuple = activation
lowercase__ : List[str] = initializer_range
lowercase__ : Tuple = qa_dropout
lowercase__ : Tuple = seq_classif_dropout
super().__init__(**_snake_case ,pad_token_id=_snake_case )
class __A ( A_ ):
'''simple docstring'''
@property
def UpperCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 302
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[int]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Dict = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : Any = 2
# Initialize accelerator
lowercase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : List[Any] = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : List[str] = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : int = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : int = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __A :
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : Dict ,_snake_case : Union[str, Any]=3 ,_snake_case : List[Any]=7 ,_snake_case : Dict=True ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=True ,_snake_case : Any=99 ,_snake_case : Any=32 ,_snake_case : str=5 ,_snake_case : Union[str, Any]=4 ,_snake_case : List[str]=37 ,_snake_case : Optional[Any]="gelu" ,_snake_case : Optional[Any]=0.1 ,_snake_case : List[str]=0.1 ,_snake_case : Dict=512 ,_snake_case : List[str]=16 ,_snake_case : List[Any]=2 ,_snake_case : Dict=0.02 ,_snake_case : Any=3 ,_snake_case : int=4 ,_snake_case : Any=None ,) -> Tuple:
"""simple docstring"""
lowercase__ : str = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Any = seq_length
lowercase__ : int = is_training
lowercase__ : List[Any] = use_input_mask
lowercase__ : Union[str, Any] = use_token_type_ids
lowercase__ : List[Any] = use_labels
lowercase__ : List[str] = vocab_size
lowercase__ : str = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[int] = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Optional[int] = num_choices
lowercase__ : Union[str, Any] = scope
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Dict = None
if self.use_input_mask:
lowercase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Any = None
lowercase__ : int = None
lowercase__ : Dict = None
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,pad_token_id=1 ,new_decoder_architecture=_snake_case ,)
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : str ,_snake_case : Tuple ,_snake_case : str ,_snake_case : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = FalconModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Any = model(_snake_case ,attention_mask=_snake_case )
lowercase__ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : int ,_snake_case : Optional[int] ,_snake_case : Any ,_snake_case : Optional[int] ,_snake_case : str ,_snake_case : str ,_snake_case : int ,_snake_case : Dict ,_snake_case : Tuple ,) -> Dict:
"""simple docstring"""
lowercase__ : Any = True
lowercase__ : int = FalconModel(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[int] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,)
lowercase__ : List[str] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,)
lowercase__ : Union[str, Any] = model(_snake_case ,attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : int ,_snake_case : Any ,_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : List[str] ,_snake_case : str ,_snake_case : Union[str, Any] ,_snake_case : Tuple ,_snake_case : Union[str, Any] ,_snake_case : str ,) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = FalconForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Dict ,_snake_case : str ,_snake_case : List[str] ,_snake_case : List[Any] ,_snake_case : Optional[int] ,_snake_case : str ,_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : Any ,_snake_case : Optional[int] ,) -> int:
"""simple docstring"""
lowercase__ : Tuple = True
lowercase__ : Any = True
lowercase__ : Dict = FalconForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowercase__ : Union[str, Any] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,use_cache=_snake_case ,)
lowercase__ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase__ : str = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase__ : Any = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase__ : Union[str, Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase__ : Optional[int] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0]
lowercase__ : Union[str, Any] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,past_key_values=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0]
# select random slice
lowercase__ : List[Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-3 ) )
def UpperCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : int = config_and_inputs
lowercase__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase : List[str] = (FalconForCausalLM,) if is_torch_available() else ()
lowerCAmelCase : Tuple = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[Any] = False
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = FalconModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , *lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowercase__ : Optional[int] = alibi
self.model_tester.create_and_check_model(_snake_case ,*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = 3
lowercase__ : Optional[Any] = input_dict['''input_ids''']
lowercase__ : Optional[int] = input_ids.ne(1 ).to(_snake_case )
lowercase__ : Any = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowercase__ : Optional[int] = FalconForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = 3
lowercase__ : Any = '''single_label_classification'''
lowercase__ : Any = input_dict['''input_ids''']
lowercase__ : str = input_ids.ne(1 ).to(_snake_case )
lowercase__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowercase__ : Any = FalconForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = input_dict['''input_ids''']
lowercase__ : List[Any] = FalconForCausalLM(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Dict = model(_snake_case ,use_cache=_snake_case )
lowercase__ : List[str] = input_ids.shape[0]
lowercase__ : Optional[Any] = model._convert_to_rw_cache(result.past_key_values )
lowercase__ : Dict = model._convert_cache_to_standard_format(_snake_case ,_snake_case )
for layer in range(len(_snake_case ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = 3
lowercase__ : Tuple = '''multi_label_classification'''
lowercase__ : Optional[Any] = input_dict['''input_ids''']
lowercase__ : Union[str, Any] = input_ids.ne(1 ).to(_snake_case )
lowercase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : Optional[Any] = FalconForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Any = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_snake_case ,'''use_cache''' ):
return
lowercase__ : str = model_class(_snake_case ).to(_snake_case )
if "use_cache" not in inputs:
lowercase__ : int = True
lowercase__ : Optional[int] = model(**_snake_case )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowercase__ : Optional[int] = (
getattr(_snake_case ,'''decoder_layers''' ,_snake_case )
or getattr(_snake_case ,'''num_decoder_layers''' ,_snake_case )
or config.num_hidden_layers
)
lowercase__ : List[str] = getattr(_snake_case ,'''num_kv_heads''' ,config.num_attention_heads )
lowercase__ : List[Any] = getattr(_snake_case ,'''d_model''' ,config.hidden_size )
lowercase__ : Dict = embed_dim // num_attention_heads
lowercase__ : Tuple = outputs['''past_key_values''']
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ : Dict = inputs['''input_ids'''].shape
for i in range(_snake_case ):
if config.new_decoder_architecture:
lowercase__ : int = config.num_attention_heads
elif config.multi_query:
lowercase__ : int = 1
self.assertEqual(len(past_kv[0] ) ,2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
lowercase__ : List[Any] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(_snake_case )
lowercase__ : List[str] = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_snake_case )
lowercase__ : Any = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
lowercase__ : List[Any] = model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=19 )
lowercase__ : Union[str, Any] = tokenizer.batch_decode(_snake_case )[0]
self.assertEqual(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : Union[str, Any] = FalconForCausalLM.from_pretrained(_snake_case )
model.eval()
model.to(_snake_case )
lowercase__ : Optional[Any] = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_snake_case )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=4 )
model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=4 )
model.generate(**_snake_case ,num_beams=2 ,max_new_tokens=4 )
@slow
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : List[Any] = FalconForCausalLM.from_pretrained(_snake_case )
model.eval()
model.to(device=_snake_case )
lowercase__ : List[Any] = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_snake_case )
# Test results are the same with and without cache
lowercase__ : Optional[Any] = model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=20 ,use_cache=_snake_case )
lowercase__ : str = model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=20 ,use_cache=_snake_case )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 302
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : str = tokenizer('''This is me''' ,return_tensors='''pt''' )
lowercase__ : Tuple = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ : Optional[int] = model.generate(**_snake_case )
lowercase__ : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ : int = model_reloaded.generate(**_snake_case )
self.assertTrue(torch.allclose(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_snake_case ):
model.save_pretrained(_snake_case )
lowercase__ : int = model.reverse_bettertransformer()
model.save_pretrained(_snake_case )
| 302
| 1
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 302
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Any:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
lowercase__ : List[Any] = torch.load(__lowerCamelCase , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
lowercase__ : int = convert_pytorch_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowercase__ : Dict = convert_pytorch_sharded_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
return flax_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(__lowerCamelCase ) -> bool:
return len(set(__lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowercase__ : int = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowercase__ : Any = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowercase__ : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ : Optional[int] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowercase__ : List[str] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# convert pytorch tensor to numpy
lowercase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowercase__ : str = flax_model.params['''params''']
else:
lowercase__ : Optional[int] = flax_model.params
lowercase__ : Optional[Any] = flatten_dict(__lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Tuple = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__lowerCamelCase )
lowercase__ : int = {}
lowercase__ : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : Optional[Any] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : List[str] = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowercase__ : int = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : Tuple = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Any = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
import torch
# Load the index
lowercase__ : Dict = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowercase__ : Optional[int] = torch.load(__lowerCamelCase )
lowercase__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Optional[Any] = flax_model.params['''params''']
lowercase__ : List[Any] = flatten_dict(__lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowercase__ : Union[str, Any] = flax_model.params
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : List[str] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Tuple = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : str = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
continue
if "var" in flax_key[-1]:
lowercase__ : str = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : List[str] = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
lowercase__ : Optional[int] = getattr(__lowerCamelCase , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCamelCase , '''rb''' ) as state_f:
try:
lowercase__ : str = from_bytes(__lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase__ : Any = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa , __lowerCamelCase ) ).values()
if any(__lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase__ : Union[str, Any] = jax.tree_util.tree_map(
lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCamelCase )
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : List[str] = pt_model.state_dict()
lowercase__ : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowercase__ : int = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowercase__ : List[str] = []
lowercase__ : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ : List[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
lowercase__ : Optional[int] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCamelCase ) not in pt_model_dict:
# conv layer
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : List[str] = jnp.transpose(__lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ) not in pt_model_dict:
# linear layer
lowercase__ : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowercase__ : Any = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowercase__ : Union[str, Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowercase__ : Dict = '''.'''.join(__lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowercase__ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowercase__ : str = key.split('''.''' )
lowercase__ : Optional[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowercase__ : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowercase__ : str = key_components[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[int] = key_components[:-3] + [name]
lowercase__ : List[str] = '''.'''.join(__lowerCamelCase )
lowercase__ : List[Any] = key
if flax_key in special_pt_names:
lowercase__ : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowercase__ : List[str] = np.asarray(__lowerCamelCase ) if not isinstance(__lowerCamelCase , np.ndarray ) else flax_tensor
lowercase__ : List[str] = torch.from_numpy(__lowerCamelCase )
# remove from missing keys
missing_keys.remove(__lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCamelCase )
pt_model.load_state_dict(__lowerCamelCase )
# re-transform missing_keys to list
lowercase__ : Optional[Any] = list(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(__lowerCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 302
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["keras_nlp"]
def __init__( self : int ,*_snake_case : str ,**_snake_case : Any ) -> int:
"""simple docstring"""
requires_backends(self ,['''keras_nlp'''] )
| 302
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Any ,_snake_case : UNetaDModel ,_snake_case : UNetaDModel ,_snake_case : DDPMScheduler ,_snake_case : Any ,) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = value_function
lowercase__ : Optional[int] = unet
lowercase__ : Tuple = scheduler
lowercase__ : Dict = env
lowercase__ : int = env.get_dataset()
lowercase__ : Dict = {}
for key in self.data.keys():
try:
lowercase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ : List[Any] = {}
for key in self.data.keys():
try:
lowercase__ : str = self.data[key].std()
except: # noqa: E722
pass
lowercase__ : Tuple = env.observation_space.shape[0]
lowercase__ : Optional[int] = env.action_space.shape[0]
def UpperCAmelCase ( self : str ,_snake_case : Any ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase ( self : Dict ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
if type(_snake_case ) is dict:
return {k: self.to_torch(_snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(_snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(_snake_case ,device=self.unet.device )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
lowercase__ : List[Any] = val.clone()
return x_in
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = x.shape[0]
lowercase__ : Dict = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ : Dict = torch.full((batch_size,) ,_snake_case ,device=self.unet.device ,dtype=torch.long )
for _ in range(_snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ : int = self.value_function(x.permute(0 ,2 ,1 ) ,_snake_case ).sample
lowercase__ : Optional[Any] = torch.autograd.grad([y.sum()] ,[x] )[0]
lowercase__ : List[str] = self.scheduler._get_variance(_snake_case )
lowercase__ : Union[str, Any] = torch.exp(0.5 * posterior_variance )
lowercase__ : Optional[int] = model_std * grad
lowercase__ : Optional[Any] = 0
lowercase__ : str = x.detach()
lowercase__ : Dict = x + scale * grad
lowercase__ : str = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.unet(x.permute(0 ,2 ,1 ) ,_snake_case ).sample.permute(0 ,2 ,1 )
# TODO: verify deprecation of this kwarg
lowercase__ : Dict = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ,predict_epsilon=_snake_case )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase__ : Dict = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.to_torch(_snake_case )
return x, y
def __call__( self : Union[str, Any] ,_snake_case : Any ,_snake_case : Tuple=64 ,_snake_case : Any=32 ,_snake_case : Optional[Any]=2 ,_snake_case : str=0.1 ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = self.normalize(_snake_case ,'''observations''' )
lowercase__ : Tuple = obs[None].repeat(_snake_case ,axis=0 )
lowercase__ : Dict = {0: self.to_torch(_snake_case )}
lowercase__ : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ : Optional[int] = randn_tensor(_snake_case ,device=self.unet.device )
lowercase__ : Tuple = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : str = self.to_torch(_snake_case )
# run the diffusion process
lowercase__ , lowercase__ : int = self.run_diffusion(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# sort output trajectories by value
lowercase__ : Optional[Any] = y.argsort(0 ,descending=_snake_case ).squeeze()
lowercase__ : str = x[sorted_idx]
lowercase__ : str = sorted_values[:, :, : self.action_dim]
lowercase__ : Optional[int] = actions.detach().cpu().numpy()
lowercase__ : List[str] = self.de_normalize(_snake_case ,key='''actions''' )
# select the action with the highest value
if y is not None:
lowercase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ : str = np.random.randint(0 ,_snake_case )
lowercase__ : int = denorm_actions[selected_index, 0]
return denorm_actions
| 302
| 1
|
"""simple docstring"""
from math import factorial, pi
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 30 ) -> float:
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowercase__ : Union[str, Any] = float(__lowerCamelCase )
lowercase__ : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__lowerCamelCase ) )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 30 ) -> float:
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowercase__ : int = float(__lowerCamelCase )
lowercase__ : Tuple = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 302
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/krishnap25/mauve''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/krishnap25/mauve'''] ,reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : List[str]=None ,_snake_case : Tuple=None ,_snake_case : List[Any]=None ,_snake_case : Any=None ,_snake_case : Optional[int]="auto" ,_snake_case : Optional[int]=-1 ,_snake_case : Optional[int]=0.9 ,_snake_case : Any=5 ,_snake_case : Dict=500 ,_snake_case : Optional[int]="gpt2-large" ,_snake_case : Optional[Any]=-1 ,_snake_case : Tuple=1_024 ,_snake_case : Optional[int]=25 ,_snake_case : Dict=5 ,_snake_case : int=True ,_snake_case : Union[str, Any]=25 ,) -> Any:
"""simple docstring"""
lowercase__ : Any = compute_mauve(
p_text=_snake_case ,q_text=_snake_case ,p_features=_snake_case ,q_features=_snake_case ,p_tokens=_snake_case ,q_tokens=_snake_case ,num_buckets=_snake_case ,pca_max_data=_snake_case ,kmeans_explained_var=_snake_case ,kmeans_num_redo=_snake_case ,kmeans_max_iter=_snake_case ,featurize_model_name=_snake_case ,device_id=_snake_case ,max_text_length=_snake_case ,divergence_curve_discretization_size=_snake_case ,mauve_scaling_factor=_snake_case ,verbose=_snake_case ,seed=_snake_case ,)
return out
| 302
| 1
|
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : Any = multiprocessing.Manager()
lowercase__ : Dict = manager.list()
lowercase__ : Union[str, Any] = multiprocessing.Process(target=__lowerCamelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowercase__ : List[str] = shutil.rmtree
lowercase__ : Optional[Any] = os.rmdir
lowercase__ : Union[str, Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowercase__ : int = {}
with swallow_io():
with time_limit(__lowerCamelCase ):
exec(__lowerCamelCase , __lowerCamelCase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
lowercase__ : Optional[Any] = rmtree
lowercase__ : str = rmdir
lowercase__ : str = chdir
@contextlib.contextmanager
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
def signal_handler(__lowerCamelCase , __lowerCamelCase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowerCamelCase )
signal.signal(signal.SIGALRM , __lowerCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowerCamelCase ):
with contextlib.redirect_stderr(__lowerCamelCase ):
with redirect_stdin(__lowerCamelCase ):
yield
@contextlib.contextmanager
def __UpperCAmelCase ( ) -> List[Any]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowerCamelCase ):
yield dirname
class __A ( A_ ):
'''simple docstring'''
pass
class __A ( io.StringIO ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ,*_snake_case : int ,**_snake_case : List[Any] ) -> str:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self : Any ,*_snake_case : Tuple ,**_snake_case : Dict ) -> Any:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self : Dict ,*_snake_case : Dict ,**_snake_case : str ) -> List[str]:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self : int ,*_snake_case : str ,**_snake_case : str ) -> int:
"""simple docstring"""
return False
class __A ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
lowerCAmelCase : List[Any] = "stdin"
@contextlib.contextmanager
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
if root == ".":
yield
return
lowercase__ : List[Any] = os.getcwd()
os.chdir(__lowerCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase=None ) -> Optional[int]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowercase__ : List[str] = None
lowercase__ : Tuple = None
import os
lowercase__ : List[str] = '''1'''
lowercase__ : Optional[int] = None
lowercase__ : List[str] = None
lowercase__ : Optional[Any] = None
lowercase__ : List[str] = None
lowercase__ : str = None
lowercase__ : str = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[Any] = None
lowercase__ : Tuple = None
lowercase__ : Tuple = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
lowercase__ : Tuple = None
lowercase__ : Any = None
lowercase__ : Optional[int] = None
lowercase__ : Tuple = None
lowercase__ : str = None
lowercase__ : List[Any] = None
lowercase__ : Optional[Any] = None
lowercase__ : Any = None
lowercase__ : Tuple = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
lowercase__ : List[str] = None
lowercase__ : Union[str, Any] = None
lowercase__ : Tuple = None
lowercase__ : List[str] = None
import shutil
lowercase__ : List[Any] = None
lowercase__ : List[Any] = None
lowercase__ : Tuple = None
import subprocess
lowercase__ : Any = None # type: ignore
lowercase__ : int = None
import sys
lowercase__ : str = None
lowercase__ : Tuple = None
lowercase__ : int = None
lowercase__ : Optional[Any] = None
lowercase__ : Optional[Any] = None
| 302
|
"""simple docstring"""
import math
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Tuple = 0
lowercase__ : Tuple = 0
while num > 0:
lowercase__ : int = num % 8
lowercase__ : Tuple = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
lowercase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(__lowerCamelCase )}"""
def __UpperCAmelCase ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "openai/whisper-base"
lowerCAmelCase : str = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
lowerCAmelCase : List[Any] = "transcriber"
lowerCAmelCase : Union[str, Any] = WhisperProcessor
lowerCAmelCase : List[Any] = WhisperForConditionalGeneration
lowerCAmelCase : Optional[int] = ["audio"]
lowerCAmelCase : int = ["text"]
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> List[str]:
"""simple docstring"""
return self.pre_processor(_snake_case ,return_tensors='''pt''' ).input_features
def UpperCAmelCase ( self : Any ,_snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.model.generate(inputs=_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[Any] ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(_snake_case ,skip_special_tokens=_snake_case )[0]
| 302
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase_ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase_ = 'UperNetConfig'
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : int ,_snake_case : int ,_snake_case : Union[int, Tuple[int, int]] ,_snake_case : Union[int, Tuple[int, int], str] = 0 ,_snake_case : bool = False ,_snake_case : Union[int, Tuple[int, int]] = 1 ,) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = nn.Convad(
in_channels=_snake_case ,out_channels=_snake_case ,kernel_size=_snake_case ,padding=_snake_case ,bias=_snake_case ,dilation=_snake_case ,)
lowercase__ : Tuple = nn.BatchNormad(_snake_case )
lowercase__ : List[str] = nn.ReLU()
def UpperCAmelCase ( self : str ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.conv(_snake_case )
lowercase__ : List[str] = self.batch_norm(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : int ,_snake_case : int ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = [
nn.AdaptiveAvgPoolad(_snake_case ),
UperNetConvModule(_snake_case ,_snake_case ,kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Any = input
for layer in self.layers:
lowercase__ : int = layer(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : Tuple[int, ...] ,_snake_case : int ,_snake_case : int ,_snake_case : bool ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = pool_scales
lowercase__ : Dict = align_corners
lowercase__ : Optional[Any] = in_channels
lowercase__ : Optional[Any] = channels
lowercase__ : int = []
for i, pool_scale in enumerate(_snake_case ):
lowercase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_snake_case ,in_channels=_snake_case ,channels=_snake_case )
self.blocks.append(_snake_case )
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : torch.Tensor ) -> List[torch.Tensor]:
"""simple docstring"""
lowercase__ : int = []
for ppm in self.blocks:
lowercase__ : Any = ppm(_snake_case )
lowercase__ : int = nn.functional.interpolate(
_snake_case ,size=x.size()[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
ppm_outs.append(_snake_case )
return ppm_outs
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ : str = config
lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__ : Optional[Any] = in_channels
lowercase__ : Any = config.hidden_size
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
# PSP Module
lowercase__ : Dict = UperNetPyramidPoolingModule(
self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,)
lowercase__ : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
# FPN Module
lowercase__ : Any = nn.ModuleList()
lowercase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__ : List[Any] = UperNetConvModule(_snake_case ,self.channels ,kernel_size=1 )
lowercase__ : Optional[int] = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 )
self.lateral_convs.append(_snake_case )
self.fpn_convs.append(_snake_case )
lowercase__ : int = UperNetConvModule(
len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Dict = inputs[-1]
lowercase__ : Optional[int] = [x]
psp_outs.extend(self.psp_modules(_snake_case ) )
lowercase__ : Optional[Any] = torch.cat(_snake_case ,dim=1 )
lowercase__ : List[str] = self.bottleneck(_snake_case )
return output
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_snake_case ) )
# build top-down path
lowercase__ : List[Any] = len(_snake_case )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Union[str, Any] = laterals[i - 1].shape[2:]
lowercase__ : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] ,size=_snake_case ,mode='''bilinear''' ,align_corners=self.align_corners )
# build outputs
lowercase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Any = nn.functional.interpolate(
fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
lowercase__ : Any = torch.cat(_snake_case ,dim=1 )
lowercase__ : Any = self.fpn_bottleneck(_snake_case )
lowercase__ : str = self.classifier(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : List[Any] ,_snake_case : int = 2 ,_snake_case : int = 3 ,_snake_case : Union[int, Tuple[int, int]] = 1 ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = config
lowercase__ : Dict = config.auxiliary_in_channels
lowercase__ : Optional[int] = config.auxiliary_channels
lowercase__ : List[Any] = config.auxiliary_num_convs
lowercase__ : List[Any] = config.auxiliary_concat_input
lowercase__ : str = in_index
lowercase__ : Any = (kernel_size // 2) * dilation
lowercase__ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
if self.num_convs == 0:
lowercase__ : List[str] = nn.Identity()
else:
lowercase__ : Dict = nn.Sequential(*_snake_case )
if self.concat_input:
lowercase__ : int = UperNetConvModule(
self.in_channels + self.channels ,self.channels ,kernel_size=_snake_case ,padding=kernel_size // 2 )
lowercase__ : List[str] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : str = encoder_hidden_states[self.in_index]
lowercase__ : List[str] = self.convs(_snake_case )
if self.concat_input:
lowercase__ : Any = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) )
lowercase__ : Dict = self.classifier(_snake_case )
return output
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = UperNetConfig
lowerCAmelCase : str = "pixel_values"
lowerCAmelCase : Dict = True
def UpperCAmelCase ( self : int ,_snake_case : str ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : str=False ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[Any] = value
lowerCAmelCase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,A_ ,)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Tuple ) -> int:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__ : Any = UperNetHead(_snake_case ,in_channels=self.backbone.channels )
lowercase__ : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_snake_case ,config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs(
_snake_case ,output_hidden_states=_snake_case ,output_attentions=_snake_case )
lowercase__ : Optional[int] = outputs.feature_maps
lowercase__ : Tuple = self.decode_head(_snake_case )
lowercase__ : Optional[int] = nn.functional.interpolate(_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : List[str] = None
if self.auxiliary_head is not None:
lowercase__ : str = self.auxiliary_head(_snake_case )
lowercase__ : Dict = nn.functional.interpolate(
_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__ : Union[str, Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__ : Tuple = (logits,) + outputs[1:]
else:
lowercase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 302
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = "perceiver"
def __init__( self : List[Any] ,_snake_case : Optional[int]=256 ,_snake_case : Optional[int]=1_280 ,_snake_case : Optional[Any]=768 ,_snake_case : Dict=1 ,_snake_case : Union[str, Any]=26 ,_snake_case : Any=8 ,_snake_case : Union[str, Any]=8 ,_snake_case : Dict=None ,_snake_case : Union[str, Any]=None ,_snake_case : str="kv" ,_snake_case : Tuple=1 ,_snake_case : Tuple=1 ,_snake_case : Optional[int]="gelu" ,_snake_case : Dict=0.1 ,_snake_case : int=0.02 ,_snake_case : Dict=1e-12 ,_snake_case : List[str]=True ,_snake_case : Optional[int]=262 ,_snake_case : str=2_048 ,_snake_case : int=56 ,_snake_case : Dict=[368, 496] ,_snake_case : str=16 ,_snake_case : int=1_920 ,_snake_case : Dict=16 ,_snake_case : List[str]=[1, 16, 224, 224] ,**_snake_case : Optional[int] ,) -> str:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : int = num_latents
lowercase__ : Any = d_latents
lowercase__ : Dict = d_model
lowercase__ : Any = num_blocks
lowercase__ : Any = num_self_attends_per_block
lowercase__ : Optional[int] = num_self_attention_heads
lowercase__ : Union[str, Any] = num_cross_attention_heads
lowercase__ : Tuple = qk_channels
lowercase__ : Union[str, Any] = v_channels
lowercase__ : Dict = cross_attention_shape_for_attention
lowercase__ : Dict = self_attention_widening_factor
lowercase__ : List[str] = cross_attention_widening_factor
lowercase__ : Any = hidden_act
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Dict = initializer_range
lowercase__ : int = layer_norm_eps
lowercase__ : int = use_query_residual
# masked language modeling attributes
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = max_position_embeddings
# image classification attributes
lowercase__ : Any = image_size
# flow attributes
lowercase__ : Optional[Any] = train_size
# multimodal autoencoding attributes
lowercase__ : int = num_frames
lowercase__ : Dict = audio_samples_per_frame
lowercase__ : Union[str, Any] = samples_per_patch
lowercase__ : Optional[int] = output_shape
class __A ( A_ ):
'''simple docstring'''
@property
def UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def UpperCAmelCase ( self : Dict ) -> float:
"""simple docstring"""
return 1e-4
def UpperCAmelCase ( self : Tuple ,_snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_snake_case : int = -1 ,_snake_case : int = -1 ,_snake_case : int = -1 ,_snake_case : bool = False ,_snake_case : Optional[TensorType] = None ,_snake_case : int = 3 ,_snake_case : int = 40 ,_snake_case : int = 40 ,) -> Mapping[str, Any]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : Union[str, Any] = compute_effective_axis_dimension(
_snake_case ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ : Optional[Any] = preprocessor.num_special_tokens_to_add(_snake_case )
lowercase__ : Optional[Any] = compute_effective_axis_dimension(
_snake_case ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_snake_case )
# Generate dummy inputs according to compute batch and sequence
lowercase__ : int = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase__ : Any = dict(preprocessor(_snake_case ,return_tensors=_snake_case ) )
lowercase__ : Tuple = inputs.pop('''input_ids''' )
return inputs
elif isinstance(_snake_case ,_snake_case ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : int = compute_effective_axis_dimension(_snake_case ,fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase__ : Optional[Any] = self._generate_dummy_images(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ : str = dict(preprocessor(images=_snake_case ,return_tensors=_snake_case ) )
lowercase__ : List[Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 302
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase_ = _symbol_database.Default()
lowerCAmelCase_ = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowerCAmelCase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase_ = None
lowerCAmelCase_ = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase_ = 45
lowerCAmelCase_ = 1_581
lowerCAmelCase_ = 1_517
lowerCAmelCase_ = 1_570
lowerCAmelCase_ = 1_584
lowerCAmelCase_ = 1_793
lowerCAmelCase_ = 1_795
lowerCAmelCase_ = 1_916
lowerCAmelCase_ = 1_864
lowerCAmelCase_ = 1_905
lowerCAmelCase_ = 1_919
lowerCAmelCase_ = 2_429
lowerCAmelCase_ = 2_208
lowerCAmelCase_ = 2_418
lowerCAmelCase_ = 2_323
lowerCAmelCase_ = 2_407
# @@protoc_insertion_point(module_scope)
| 302
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A_ )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} )
lowerCAmelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
lowerCAmelCase : str = "text"
lowerCAmelCase : str = "labels"
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,_snake_case ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowercase__ : int = copy.deepcopy(self )
lowercase__ : Any = self.label_schema.copy()
lowercase__ : Any = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def UpperCAmelCase ( self : Dict ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 302
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model'}
lowerCAmelCase_ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : Tuple ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=True ,_snake_case : int=False ,_snake_case : Union[str, Any]="<s>" ,_snake_case : Tuple="</s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : List[str]="<sep>" ,_snake_case : List[str]="<pad>" ,_snake_case : str="<cls>" ,_snake_case : str="<mask>" ,_snake_case : Any=["<eop>", "<eod>"] ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : Optional[int] ,) -> None:
"""simple docstring"""
lowercase__ : str = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token
lowercase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case ,remove_space=_snake_case ,keep_accents=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,additional_special_tokens=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : Union[str, Any] = 3
lowercase__ : Union[str, Any] = do_lower_case
lowercase__ : Optional[Any] = remove_space
lowercase__ : List[str] = keep_accents
lowercase__ : str = vocab_file
lowercase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase__ : Optional[int] = jieba
lowercase__ : List[str] = str.maketrans(''' \n''' ,'''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
lowercase__ : Any = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.__dict__.copy()
lowercase__ : str = None
return state
def __setstate__( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Union[str, Any] = {}
lowercase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
if self.remove_space:
lowercase__ : int = ''' '''.join(inputs.strip().split() )
else:
lowercase__ : str = inputs
lowercase__ : Optional[int] = outputs.replace('''``''' ,'''"''' ).replace('''\'\'''' ,'''"''' )
if not self.keep_accents:
lowercase__ : Tuple = unicodedata.normalize('''NFKD''' ,_snake_case )
lowercase__ : Union[str, Any] = ''''''.join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
lowercase__ : Tuple = outputs.lower()
return outputs
def UpperCAmelCase ( self : Dict ,_snake_case : str ) -> List[str]:
"""simple docstring"""
lowercase__ : str = self.preprocess_text(_snake_case )
lowercase__ : Any = self.sp_model.encode(_snake_case ,out_type=_snake_case )
lowercase__ : Union[str, Any] = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case ,'''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ : Optional[int] = cur_pieces[1:]
else:
lowercase__ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def UpperCAmelCase ( self : List[Any] ,_snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.sp_model.PieceToId(_snake_case )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : int ) -> int:
"""simple docstring"""
return self.sp_model.IdToPiece(_snake_case )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self : Optional[int] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1, 1]
return ([0] * len(_snake_case )) + [1, 1]
def UpperCAmelCase ( self : Tuple ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = [self.sep_token_id]
lowercase__ : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Tuple = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def UpperCAmelCase ( self : int ,*_snake_case : int ,**_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = super()._decode(*_snake_case ,**_snake_case )
lowercase__ : str = text.replace(''' ''' ,'''''' ).replace('''\u2582''' ,''' ''' ).replace('''\u2583''' ,'''\n''' )
return text
| 302
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = '''The dog is cute and lives in the garden house'''
lowercase__ : int = jnp.array([tokenizer.encode(_snake_case )] )
lowercase__ : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase__ : Optional[Any] = model(_snake_case )['''last_hidden_state''']
self.assertEqual(output.shape ,_snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_snake_case ,atol=1e-3 ) )
| 302
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
lowercase__ : List[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase__ : str = [1_44, 1_92, 2_40]
lowercase__ : List[str] = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
lowercase__ : Any = [96, 1_20, 1_44]
lowercase__ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
lowercase__ : Dict = [64, 80, 96]
lowercase__ : Optional[int] = [16, 16, 24, 48, 64, 80, 3_20]
lowercase__ : Tuple = 0.0_5
lowercase__ : List[Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowercase__ : int = 5_12
lowercase__ : str = 16
lowercase__ : int = 21
lowercase__ : Tuple = '''pascal-voc-id2label.json'''
else:
lowercase__ : Dict = 10_00
lowercase__ : int = '''imagenet-1k-id2label.json'''
lowercase__ : str = '''huggingface/label-files'''
lowercase__ : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Dict = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Any = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=False ) -> Optional[int]:
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
lowercase__ : Optional[Any] = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase__ : Union[str, Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowercase__ : List[str] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowercase__ : Dict = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowercase__ : str = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowercase__ : Optional[Any] = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowercase__ : Optional[int] = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowercase__ : List[Any] = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowercase__ : List[str] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowercase__ : Dict = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
lowercase__ : Optional[int] = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
lowercase__ : str = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
lowercase__ : Optional[Any] = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowercase__ : Union[str, Any] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowercase__ : Tuple = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
lowercase__ : Optional[Any] = name.replace(f""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if f""".global_rep.{i}.bias""" in name:
lowercase__ : Any = name.replace(f""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowercase__ : int = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowercase__ : Union[str, Any] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowercase__ : Any = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowercase__ : str = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowercase__ : Any = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowercase__ : int = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowercase__ : List[str] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowercase__ : List[str] = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowercase__ : Tuple = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowercase__ : Optional[int] = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowercase__ : Tuple = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowercase__ : Dict = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowercase__ : Optional[Any] = '''mobilevit.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Tuple:
if base_model:
lowercase__ : Optional[Any] = ''''''
else:
lowercase__ : Tuple = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(__lowerCamelCase )
if key[:8] == "encoder.":
lowercase__ : Dict = key[8:]
if "qkv" in key:
lowercase__ : Union[str, Any] = key.split('''.''' )
lowercase__ : Optional[Any] = int(key_split[0][6:] ) - 1
lowercase__ : Union[str, Any] = int(key_split[3] )
lowercase__ : List[Any] = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase__ : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase__ : Any = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : Union[str, Any] = val[dim : dim * 2, :]
lowercase__ : Union[str, Any] = val[-dim:, :]
else:
lowercase__ : List[Any] = val[:dim]
lowercase__ : Tuple = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
else:
lowercase__ : Any = val
return orig_state_dict
def __UpperCAmelCase ( ) -> str:
lowercase__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : Tuple = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Dict:
lowercase__ : Any = get_mobilevit_config(__lowerCamelCase )
# load original state_dict
lowercase__ : List[str] = torch.load(__lowerCamelCase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowercase__ : Dict = MobileViTForSemanticSegmentation(__lowerCamelCase ).eval()
else:
lowercase__ : Optional[Any] = MobileViTForImageClassification(__lowerCamelCase ).eval()
lowercase__ : Optional[Any] = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase__ : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase__ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase__ : List[str] = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase__ : str = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase__ : List[str] = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase__ : Union[str, Any] = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
lowercase__ : Dict = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
lowercase__ : Optional[Any] = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
lowercase__ : List[Any] = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
lowercase__ : Union[str, Any] = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowercase__ : Optional[int] = model_mapping[mobilevit_name]
image_processor.push_to_hub(__lowerCamelCase , organization='''apple''' )
model.push_to_hub(__lowerCamelCase , organization='''apple''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 302
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = '#'
class __A :
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
lowercase__ : dict = {}
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> None:
"""simple docstring"""
lowercase__ : str = self._trie
for char in text:
if char not in trie:
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[Any] = trie[char]
lowercase__ : Dict = True
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> tuple | list:
"""simple docstring"""
lowercase__ : Optional[Any] = self._trie
for char in prefix:
if char in trie:
lowercase__ : Union[str, Any] = trie[char]
else:
return []
return self._elements(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : dict ) -> tuple:
"""simple docstring"""
lowercase__ : str = []
for c, v in d.items():
lowercase__ : List[Any] = [''' '''] if c == END else [(c + s) for s in self._elements(_snake_case )]
result.extend(_snake_case )
return tuple(_snake_case )
lowerCAmelCase_ = Trie()
lowerCAmelCase_ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __UpperCAmelCase ( __lowerCamelCase ) -> tuple:
lowercase__ : List[Any] = trie.find_word(__lowerCamelCase )
return tuple(string + word for word in suffixes )
def __UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.json'}
lowerCAmelCase_ = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
lowerCAmelCase_ = {'mgp-str': 27}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] ,_snake_case : Union[str, Any] ,_snake_case : int="[GO]" ,_snake_case : Any="[GO]" ,_snake_case : Tuple="[s]" ,_snake_case : Optional[Any]="[GO]" ,**_snake_case : Tuple ) -> int:
"""simple docstring"""
super().__init__(
unk_token=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,pad_token=_snake_case ,**_snake_case ,)
with open(_snake_case ,encoding='''utf-8''' ) as vocab_handle:
lowercase__ : Dict = json.load(_snake_case )
lowercase__ : Any = {v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.vocab ,**self.added_tokens_encoder )
def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[Any] = []
for s in text:
char_tokens.extend(_snake_case )
return char_tokens
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.vocab.get(_snake_case ,self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self : str ,_snake_case : int ) -> Any:
"""simple docstring"""
return self.decoder.get(_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_snake_case ) )
return
lowercase__ : List[str] = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + '''\n''' )
return (vocab_file,)
| 302
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase_ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase_ = 'RegNetConfig'
# Base docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = 'tabby, tabby cat'
lowerCAmelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = nn.Convad(
_snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,)
lowercase__ : List[Any] = nn.BatchNormad(_snake_case )
lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.convolution(_snake_case )
lowercase__ : Tuple = self.normalization(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
lowercase__ : str = config.num_channels
def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ : Optional[int] = self.embedder(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case )
lowercase__ : Any = nn.BatchNormad(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.convolution(_snake_case )
lowercase__ : Optional[int] = self.normalization(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ : Dict = nn.Sequential(
nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.pooler(_snake_case )
lowercase__ : Union[str, Any] = self.attention(_snake_case )
lowercase__ : List[str] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = in_channels != out_channels or stride != 1
lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width )
lowercase__ : str = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : Optional[int] = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : str = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = hidden_state
lowercase__ : Union[str, Any] = self.layer(_snake_case )
lowercase__ : List[Any] = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = in_channels != out_channels or stride != 1
lowercase__ : List[str] = max(1 ,out_channels // config.groups_width )
lowercase__ : Tuple = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : str = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : str = hidden_state
lowercase__ : Optional[Any] = self.layer(_snake_case )
lowercase__ : int = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : str = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.layers(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ):
self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : int = hidden_states + (hidden_state,)
lowercase__ : Any = stage_module(_snake_case )
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = RegNetConfig
lowerCAmelCase : List[Any] = "regnet"
lowerCAmelCase : Optional[int] = "pixel_values"
lowerCAmelCase : Union[str, Any] = True
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : str = value
lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Any = config
lowercase__ : List[str] = RegNetEmbeddings(_snake_case )
lowercase__ : Any = RegNetEncoder(_snake_case )
lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = self.embedder(_snake_case )
lowercase__ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : str = encoder_outputs[0]
lowercase__ : Optional[int] = self.pooler(_snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Optional[Any] = config.num_labels
lowercase__ : int = RegNetModel(_snake_case )
# classification head
lowercase__ : str = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Union[str, Any] = self.classifier(_snake_case )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Dict = '''single_label_classification'''
else:
lowercase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Tuple = CrossEntropyLoss()
lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Any = BCEWithLogitsLoss()
lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case )
if not return_dict:
lowercase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 302
| 1
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / 'model_card_template.md'
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __UpperCAmelCase ( __lowerCamelCase = None ) -> str:
lowercase__ : Optional[Any] = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
return ua
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None ) -> str:
if token is None:
lowercase__ : Dict = HfFolder.get_token()
if organization is None:
lowercase__ : Optional[Any] = whoami(__lowerCamelCase )['''name''']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__lowerCamelCase , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
lowercase__ : Union[str, Any] = args.hub_token if hasattr(__lowerCamelCase , '''hub_token''' ) else None
lowercase__ : Tuple = get_full_repo_name(__lowerCamelCase , token=__lowerCamelCase )
lowercase__ : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__lowerCamelCase , model_name=__lowerCamelCase , repo_name=__lowerCamelCase , dataset_name=args.dataset_name if hasattr(__lowerCamelCase , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__lowerCamelCase , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__lowerCamelCase , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(__lowerCamelCase , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__lowerCamelCase , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__lowerCamelCase , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__lowerCamelCase , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__lowerCamelCase , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__lowerCamelCase , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(__lowerCamelCase , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__lowerCamelCase , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
lowercase__ : List[Any] = os.path.join(args.output_dir , '''README.md''' )
model_card.save(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None ) -> Optional[Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase__ : Any = str(Path(__lowerCamelCase ).as_posix() )
lowercase__ : Optional[int] = re.search(r'''snapshots/([^/]+)/''' , __lowerCamelCase )
if search is None:
return None
lowercase__ : int = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__lowerCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, 'diffusers')
def __UpperCAmelCase ( __lowerCamelCase = None , __lowerCamelCase = None ) -> None:
if new_cache_dir is None:
lowercase__ : int = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase__ : Any = old_diffusers_cache
lowercase__ : Any = Path(__lowerCamelCase ).expanduser()
lowercase__ : Optional[Any] = Path(__lowerCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase__ : Optional[int] = new_cache_dir / old_blob_path.relative_to(__lowerCamelCase )
new_blob_path.parent.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
os.replace(__lowerCamelCase , __lowerCamelCase )
try:
os.symlink(__lowerCamelCase , __lowerCamelCase )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None ) -> str:
if variant is not None:
lowercase__ : str = weights_name.split('''.''' )
lowercase__ : List[str] = splits[:-1] + [variant] + splits[-1:]
lowercase__ : List[Any] = '''.'''.join(__lowerCamelCase )
return weights_name
def __UpperCAmelCase ( __lowerCamelCase , *,
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , ) -> List[Any]:
lowercase__ : Optional[Any] = str(__lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__lowerCamelCase ):
if os.path.isfile(os.path.join(__lowerCamelCase , __lowerCamelCase ) ):
# Load from a PyTorch checkpoint
lowercase__ : Optional[Any] = os.path.join(__lowerCamelCase , __lowerCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ):
lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__lowerCamelCase ).base_version ) >= version.parse('''0.20.0''' )
):
try:
lowercase__ : int = hf_hub_download(
__lowerCamelCase , filename=_add_variant(__lowerCamelCase , __lowerCamelCase ) , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , local_files_only=__lowerCamelCase , use_auth_token=__lowerCamelCase , user_agent=__lowerCamelCase , subfolder=__lowerCamelCase , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __lowerCamelCase , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__lowerCamelCase , __lowerCamelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__lowerCamelCase , __lowerCamelCase )}' so that the correct variant file can be added.""" , __lowerCamelCase , )
try:
# 2. Load model file as usual
lowercase__ : str = hf_hub_download(
__lowerCamelCase , filename=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , local_files_only=__lowerCamelCase , use_auth_token=__lowerCamelCase , user_agent=__lowerCamelCase , subfolder=__lowerCamelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 302
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = 1.6021E-19 # units = C
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
| 1
|
"""simple docstring"""
from timeit import timeit
lowerCAmelCase_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
lowercase__ : Dict = 0
lowercase__ : List[Any] = len(__lowerCamelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
lowercase__ : Any = len(__lowerCamelCase ) // 2
lowercase__ : Any = len(__lowerCamelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__lowerCamelCase ) )
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
if len(__lowerCamelCase ) <= 2:
return True
if s[0] == s[len(__lowerCamelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
return s == s[::-1]
def __UpperCAmelCase ( __lowerCamelCase ) -> None:
lowercase__ : List[Any] = f"""all({name}(key) is value for key, value in test_data.items())"""
lowercase__ : Union[str, Any] = f"""from __main__ import test_data, {name}"""
lowercase__ : Any = 50_00_00
lowercase__ : Dict = timeit(stmt=__lowerCamelCase , setup=__lowerCamelCase , number=__lowerCamelCase )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 302
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["pixel_values"]
def __init__( self : Tuple ,_snake_case : bool = True ,_snake_case : Optional[Dict[str, int]] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : Dict[str, int] = None ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : Optional[Any] ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : str = size if size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case )
lowercase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case ,param_name='''crop_size''' )
lowercase__ : Tuple = do_resize
lowercase__ : List[Any] = do_rescale
lowercase__ : Any = do_normalize
lowercase__ : List[str] = do_center_crop
lowercase__ : Optional[Any] = crop_size
lowercase__ : Union[str, Any] = size
lowercase__ : Any = resample
lowercase__ : int = rescale_factor
lowercase__ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : str ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : List[str] = get_size_dict(_snake_case )
if "shortest_edge" in size:
lowercase__ : str = get_resize_output_image_size(_snake_case ,size=size['''shortest_edge'''] ,default_to_square=_snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowercase__ : int = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Tuple ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Optional[Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : float ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[int] ) -> np.ndarray:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : ImageInput ,_snake_case : Optional[bool] = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : int = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[float] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**_snake_case : List[str] ,) -> BatchFeature:
"""simple docstring"""
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Tuple = get_size_dict(_snake_case ,param_name='''crop_size''' ,default_to_square=_snake_case )
lowercase__ : Tuple = resample if resample is not None else self.resample
lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : int = get_size_dict(_snake_case )
if not is_batched(_snake_case ):
lowercase__ : Optional[Any] = [images]
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase__ : str = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : int = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
lowercase__ : str = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
lowercase__ : Optional[Any] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : List[str] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Any = {'''pixel_values''': images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 302
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> float:
def get_matched_characters(__lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : List[str] = []
lowercase__ : Union[str, Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase__ : Any = int(max(0 , i - limit ) )
lowercase__ : List[Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__lowerCamelCase )
lowercase__ : Tuple = f"""{_stra[0:_stra.index(__lowerCamelCase )]} {_stra[_stra.index(__lowerCamelCase ) + 1:]}"""
return "".join(__lowerCamelCase )
# matching characters
lowercase__ : str = get_matched_characters(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = get_matched_characters(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Union[str, Any] = len(__lowerCamelCase )
# transposition
lowercase__ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__lowerCamelCase , __lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
lowercase__ : Union[str, Any] = 0.0
else:
lowercase__ : Any = (
1
/ 3
* (
match_count / len(__lowerCamelCase )
+ match_count / len(__lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase__ : Union[str, Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 302
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCAmelCase_ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 302
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = "cpu" , __lowerCamelCase = None ) -> None:
lowercase__ : List[str] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowercase__ : List[Any] = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Any = src_path
torch.save(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 302
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['OwlViTFeatureExtractor']
lowerCAmelCase_ = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ , lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple:
lowercase__ : int = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "maskformer"
lowerCAmelCase : Any = {"hidden_size": "mask_feature_size"}
lowerCAmelCase : Optional[int] = ["resnet", "swin"]
lowerCAmelCase : str = ["detr"]
def __init__( self : int ,_snake_case : int = 256 ,_snake_case : int = 256 ,_snake_case : float = 0.1 ,_snake_case : bool = False ,_snake_case : Optional[Dict] = None ,_snake_case : Optional[Dict] = None ,_snake_case : float = 0.02 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 20.0 ,_snake_case : Optional[bool] = None ,**_snake_case : Optional[Any] ,) -> Dict:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ : Any = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,)
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[str] = backbone_config.pop('''model_type''' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ : Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(_snake_case ,_snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase__ : Optional[Any] = config_class.from_dict(_snake_case )
lowercase__ : List[Any] = backbone_config
lowercase__ : List[Any] = decoder_config
# main feature dimension for the model
lowercase__ : List[str] = fpn_feature_size
lowercase__ : int = mask_feature_size
# initializer
lowercase__ : str = init_std
lowercase__ : str = init_xavier_std
# Hungarian matcher && loss
lowercase__ : Optional[int] = cross_entropy_weight
lowercase__ : List[Any] = dice_weight
lowercase__ : List[str] = mask_weight
lowercase__ : str = use_auxiliary_loss
lowercase__ : Optional[int] = no_object_weight
lowercase__ : Optional[Any] = output_auxiliary_logits
lowercase__ : Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase__ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return cls(
backbone_config=_snake_case ,decoder_config=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : str ) -> Dict[str, any]:
"""simple docstring"""
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : List[Any] = self.decoder_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 302
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : Dict = [3, 3, 3, 3]
lowercase__ : str = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : List[str] = [4, 4, 4, 4]
lowercase__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
else:
lowercase__ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[int] = 96
elif "small" in model_name:
lowercase__ : Union[str, Any] = 96
elif "base" in model_name:
lowercase__ : Tuple = 1_28
elif "large" in model_name:
lowercase__ : Any = 1_92
elif "xlarge" in model_name:
lowercase__ : Any = 2_56
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 3_52
# set label information
lowercase__ : List[Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ : Optional[int] = '''imagenet-22k-id2label.json'''
else:
lowercase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowercase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ : Dict = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ : Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ : Union[str, Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ : Dict = '''layernorm.bias'''
if "head" in name:
lowercase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__ : List[Any] = '''focalnet.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[str]:
# fmt: off
lowercase__ : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ : Optional[int] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowercase__ : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ : int = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
lowercase__ : List[Any] = get_focalnet_config(__lowerCamelCase )
lowercase__ : Optional[int] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : int = BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowercase__ : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase__ : List[str] = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
lowercase__ : Optional[Any] = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__ : Optional[int] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Dict = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__ : List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 1
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCAmelCase_ = HUGGINGFACE_HUB_CACHE
lowerCAmelCase_ = 'config.json'
lowerCAmelCase_ = 'diffusion_pytorch_model.bin'
lowerCAmelCase_ = 'diffusion_flax_model.msgpack'
lowerCAmelCase_ = 'model.onnx'
lowerCAmelCase_ = 'diffusion_pytorch_model.safetensors'
lowerCAmelCase_ = 'weights.pb'
lowerCAmelCase_ = 'https://huggingface.co'
lowerCAmelCase_ = default_cache_path
lowerCAmelCase_ = 'diffusers_modules'
lowerCAmelCase_ = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowerCAmelCase_ = ['fp16', 'non-ema']
lowerCAmelCase_ = '.self_attn'
| 302
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCAmelCase : int = "ChineseCLIPImageProcessor"
lowerCAmelCase : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple ,_snake_case : str=None ,_snake_case : Union[str, Any]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
lowercase__ : List[Any] = self.image_processor
def __call__( self : List[Any] ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : List[Any]=None ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : List[Any] ,**_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,*_snake_case : Tuple ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
| 302
| 1
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __A ( A_ ,A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = "pixel_values"
lowerCAmelCase : str = False
lowerCAmelCase : Tuple = TimmBackboneConfig
def __init__( self : Dict ,_snake_case : Tuple ,**_snake_case : Tuple ) -> int:
"""simple docstring"""
requires_backends(self ,'''timm''' )
super().__init__(_snake_case )
lowercase__ : Dict = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(_snake_case ,'''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowercase__ : List[Any] = getattr(_snake_case ,'''use_pretrained_backbone''' ,_snake_case )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase__ : Union[str, Any] = config.out_indices if getattr(_snake_case ,'''out_indices''' ,_snake_case ) is not None else (-1,)
lowercase__ : int = timm.create_model(
config.backbone ,pretrained=_snake_case ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=_snake_case ,**_snake_case ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase__ : str = self._backbone.return_layers
lowercase__ : Tuple = {layer['''module''']: str(_snake_case ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : Optional[int] ,*_snake_case : Dict ,**_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls ,['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase__ : Tuple = kwargs.pop('''config''' ,TimmBackboneConfig() )
lowercase__ : Any = kwargs.pop('''use_timm_backbone''' ,_snake_case )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowercase__ : int = kwargs.pop('''num_channels''' ,config.num_channels )
lowercase__ : Tuple = kwargs.pop('''features_only''' ,config.features_only )
lowercase__ : str = kwargs.pop('''use_pretrained_backbone''' ,config.use_pretrained_backbone )
lowercase__ : List[str] = kwargs.pop('''out_indices''' ,config.out_indices )
lowercase__ : Optional[int] = TimmBackboneConfig(
backbone=_snake_case ,num_channels=_snake_case ,features_only=_snake_case ,use_pretrained_backbone=_snake_case ,out_indices=_snake_case ,)
return super()._from_config(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Dict ,_snake_case : str=None ,_snake_case : Union[str, Any]=None ,_snake_case : Dict=None ,**_snake_case : Any ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
"""simple docstring"""
lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Optional[int] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase__ : int = self._all_layers
lowercase__ : List[Any] = self._backbone(_snake_case ,**_snake_case )
lowercase__ : Tuple = self._return_layers
lowercase__ : Optional[int] = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase__ : Dict = self._backbone(_snake_case ,**_snake_case )
lowercase__ : List[Any] = None
lowercase__ : Tuple = tuple(_snake_case )
lowercase__ : Dict = tuple(_snake_case ) if hidden_states is not None else None
if not return_dict:
lowercase__ : Dict = (feature_maps,)
if output_hidden_states:
lowercase__ : Optional[int] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_snake_case ,hidden_states=_snake_case ,attentions=_snake_case )
| 302
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> "list[int]":
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
lowercase__ : List[str] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase__ : Dict = 1
if upper_limit > 0:
lowercase__ : Tuple = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowerCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowerCAmelCase_ = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 302
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
lowerCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ : str = import_module('''tasks''' )
try:
lowercase__ : List[str] = getattr(__lowerCamelCase , model_args.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowercase__ : Dict[int, str] = dict(enumerate(__lowerCamelCase ) )
lowercase__ : Optional[int] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ : str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase , __lowerCamelCase ) -> Tuple[List[int], List[int]]:
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=2 )
lowercase__ , lowercase__ : Tuple = preds.shape
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
lowercase__ : Tuple = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase ) -> Dict:
lowercase__ , lowercase__ : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase__ : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase__ : Optional[int] = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(__lowerCamelCase )
lowercase__ , lowercase__ : Tuple = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCamelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : Optional[Any] = _distribute_shards(**__lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : Optional[int] = _split_gen_kwargs(__lowerCamelCase , __lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
if expected is RuntimeError:
with pytest.raises(__lowerCamelCase ):
_number_of_shards_in_gen_kwargs(__lowerCamelCase )
else:
lowercase__ : Dict = _number_of_shards_in_gen_kwargs(__lowerCamelCase )
assert out == expected
| 302
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[int]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Dict = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : Any = 2
# Initialize accelerator
lowercase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : List[Any] = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : List[str] = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : int = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : int = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase_ = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase_ = {ord(char) for char in VALID_CHARS}
lowerCAmelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str | None:
lowercase__ : str = ""
lowercase__ : int
lowercase__ : int
lowercase__ : int
for keychar, cipherchar in zip(cycle(__lowerCamelCase ) , __lowerCamelCase ):
lowercase__ : Tuple = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__lowerCamelCase )
return decoded
def __UpperCAmelCase ( __lowerCamelCase ) -> list[str]:
lowercase__ : list[str] = []
for key in product(__lowerCamelCase , repeat=3 ):
lowercase__ : Tuple = try_key(__lowerCamelCase , __lowerCamelCase )
if encoded is not None:
possibles.append(__lowerCamelCase )
return possibles
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( __lowerCamelCase = "p059_cipher.txt" ) -> int:
lowercase__ : list[int]
lowercase__ : list[str]
lowercase__ : str
lowercase__ : str
lowercase__ : str = Path(__lowerCamelCase ).parent.joinpath(__lowerCamelCase ).read_text(encoding='''utf-8''' )
lowercase__ : List[str] = [int(__lowerCamelCase ) for number in data.strip().split(''',''' )]
lowercase__ : Dict = filter_valid_chars(__lowerCamelCase )
for common_word in COMMON_WORDS:
lowercase__ : Dict = filter_common_word(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) == 1:
break
lowercase__ : List[Any] = possibles[0]
return sum(ord(__lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 302
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : str = tokenizer('''This is me''' ,return_tensors='''pt''' )
lowercase__ : Tuple = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ : Optional[int] = model.generate(**_snake_case )
lowercase__ : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ : int = model_reloaded.generate(**_snake_case )
self.assertTrue(torch.allclose(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_snake_case ):
model.save_pretrained(_snake_case )
lowercase__ : int = model.reverse_bettertransformer()
model.save_pretrained(_snake_case )
| 302
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ = logging.getLogger(__name__)
def __UpperCAmelCase ( ) -> Any:
lowercase__ : List[str] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=__lowerCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=__lowerCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=__lowerCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=__lowerCamelCase , default=10_00 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=__lowerCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=__lowerCamelCase , default=5_12 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=__lowerCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
lowercase__ : Optional[Any] = parser.parse_args()
return args
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
def fn(__lowerCamelCase ):
return tokenizer(examples['''text'''] )
return fn
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : Dict = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
lowercase__ : int = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
lowercase__ : Any = tf.train.Features(feature=__lowerCamelCase )
lowercase__ : Optional[Any] = tf.train.Example(features=__lowerCamelCase )
lowercase__ : Optional[int] = example.SerializeToString()
records.append(__lowerCamelCase )
return records
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
lowercase__ : List[str] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : Tuple = min(len(__lowerCamelCase ) , args.limit )
lowercase__ : Union[str, Any] = dataset.select(range(__lowerCamelCase ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : Tuple = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
lowercase__ : Any = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : Optional[Any] = tokenize_function(__lowerCamelCase )
lowercase__ : Optional[int] = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCamelCase ):
# Concatenate all texts.
lowercase__ : Any = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : Optional[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : Union[str, Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[Any] = {
k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : int = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=10_00 , num_proc=4 )
lowercase__ : int = 0
lowercase__ : Any = 0
for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ):
lowercase__ : Optional[int] = grouped_dataset[shard : shard + args.shard_size]
lowercase__ : Optional[int] = len(dataset_snapshot['''input_ids'''] )
lowercase__ : Optional[Any] = os.path.join(__lowerCamelCase , f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__ : Optional[Any] = get_serialized_examples(__lowerCamelCase )
with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file:
for i in range(len(__lowerCamelCase ) ):
lowercase__ : List[str] = serialized_examples[i]
out_file.write(__lowerCamelCase )
print('''Wrote file {} containing {} records'''.format(__lowerCamelCase , __lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" , '''w''' ) as f:
print(f"""Total {args.split} records: {total_records}""" , file=__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
main(args)
| 302
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Any:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
lowercase__ : List[Any] = torch.load(__lowerCamelCase , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
lowercase__ : int = convert_pytorch_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowercase__ : Dict = convert_pytorch_sharded_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
return flax_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(__lowerCamelCase ) -> bool:
return len(set(__lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowercase__ : int = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowercase__ : Any = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowercase__ : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ : Optional[int] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowercase__ : List[str] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# convert pytorch tensor to numpy
lowercase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowercase__ : str = flax_model.params['''params''']
else:
lowercase__ : Optional[int] = flax_model.params
lowercase__ : Optional[Any] = flatten_dict(__lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Tuple = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__lowerCamelCase )
lowercase__ : int = {}
lowercase__ : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : Optional[Any] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : List[str] = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowercase__ : int = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : Tuple = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Any = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
import torch
# Load the index
lowercase__ : Dict = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowercase__ : Optional[int] = torch.load(__lowerCamelCase )
lowercase__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Optional[Any] = flax_model.params['''params''']
lowercase__ : List[Any] = flatten_dict(__lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowercase__ : Union[str, Any] = flax_model.params
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : List[str] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Tuple = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : str = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
continue
if "var" in flax_key[-1]:
lowercase__ : str = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : List[str] = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
lowercase__ : Optional[int] = getattr(__lowerCamelCase , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCamelCase , '''rb''' ) as state_f:
try:
lowercase__ : str = from_bytes(__lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase__ : Any = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa , __lowerCamelCase ) ).values()
if any(__lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase__ : Union[str, Any] = jax.tree_util.tree_map(
lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCamelCase )
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : List[str] = pt_model.state_dict()
lowercase__ : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowercase__ : int = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowercase__ : List[str] = []
lowercase__ : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ : List[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
lowercase__ : Optional[int] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCamelCase ) not in pt_model_dict:
# conv layer
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : List[str] = jnp.transpose(__lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ) not in pt_model_dict:
# linear layer
lowercase__ : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowercase__ : Any = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowercase__ : Union[str, Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowercase__ : Dict = '''.'''.join(__lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowercase__ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowercase__ : str = key.split('''.''' )
lowercase__ : Optional[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowercase__ : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowercase__ : str = key_components[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[int] = key_components[:-3] + [name]
lowercase__ : List[str] = '''.'''.join(__lowerCamelCase )
lowercase__ : List[Any] = key
if flax_key in special_pt_names:
lowercase__ : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowercase__ : List[str] = np.asarray(__lowerCamelCase ) if not isinstance(__lowerCamelCase , np.ndarray ) else flax_tensor
lowercase__ : List[str] = torch.from_numpy(__lowerCamelCase )
# remove from missing keys
missing_keys.remove(__lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCamelCase )
pt_model.load_state_dict(__lowerCamelCase )
# re-transform missing_keys to list
lowercase__ : Optional[Any] = list(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(__lowerCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 302
| 1
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ""
lowerCAmelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCAmelCase : str = None # compression type in fsspec. ex: "gzip"
lowerCAmelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] ,_snake_case : str = "" ,_snake_case : Optional[str] = None ,_snake_case : Optional[dict] = None ,**_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(self ,**_snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ : List[str] = fsspec.open(
_snake_case ,mode='''rb''' ,protocol=_snake_case ,compression=self.compression ,client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
lowercase__ : Union[str, Any] = os.path.basename(self.file.path.split('''::''' )[0] )
lowercase__ : Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowercase__ : Optional[Any] = None
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : int ) -> str:
"""simple docstring"""
return super()._strip_protocol(_snake_case ).lstrip('''/''' )
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
if self.dir_cache is None:
lowercase__ : Dict = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowercase__ : str = {f['''name''']: f}
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> Tuple:
"""simple docstring"""
return self.file.open().read()
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : str = "rb" ,_snake_case : int=None ,_snake_case : List[Any]=True ,_snake_case : Any=None ,**_snake_case : Optional[Any] ,) -> Any:
"""simple docstring"""
lowercase__ : Dict = self._strip_protocol(_snake_case )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = "bz2"
lowerCAmelCase : Optional[int] = "bz2"
lowerCAmelCase : Optional[Any] = ".bz2"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = "gzip"
lowerCAmelCase : List[str] = "gzip"
lowerCAmelCase : Optional[int] = ".gz"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = "lz4"
lowerCAmelCase : Tuple = "lz4"
lowerCAmelCase : List[Any] = ".lz4"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "xz"
lowerCAmelCase : Union[str, Any] = "xz"
lowerCAmelCase : Any = ".xz"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = "zstd"
lowerCAmelCase : Optional[int] = "zstd"
lowerCAmelCase : List[Any] = ".zst"
def __init__( self : str ,_snake_case : str ,_snake_case : str = "rb" ,_snake_case : Optional[str] = None ,_snake_case : Optional[dict] = None ,_snake_case : int = DEFAULT_BLOCK_SIZE ,**_snake_case : Any ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(
fo=_snake_case ,mode=_snake_case ,target_protocol=_snake_case ,target_options=_snake_case ,block_size=_snake_case ,**_snake_case ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ : List[str] = self.file.__enter__
class __A :
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : List[Any] ) -> int:
"""simple docstring"""
lowercase__ : int = file_
def __enter__( self : List[str] ) -> Dict:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Tuple ,*_snake_case : Optional[Any] ,**_snake_case : Tuple ) -> int:
"""simple docstring"""
self._file.__exit__(*_snake_case ,**_snake_case )
def __iter__( self : int ) -> str:
"""simple docstring"""
return iter(self._file )
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return next(self._file )
def __getattr__( self : int ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
return getattr(self._file ,_snake_case )
def fixed_enter(*_snake_case : str ,**_snake_case : List[Any] ):
return WrappedFile(_enter(*_snake_case ,**_snake_case ) )
lowercase__ : Tuple = fixed_enter
| 302
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Any ,_snake_case : UNetaDModel ,_snake_case : UNetaDModel ,_snake_case : DDPMScheduler ,_snake_case : Any ,) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = value_function
lowercase__ : Optional[int] = unet
lowercase__ : Tuple = scheduler
lowercase__ : Dict = env
lowercase__ : int = env.get_dataset()
lowercase__ : Dict = {}
for key in self.data.keys():
try:
lowercase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ : List[Any] = {}
for key in self.data.keys():
try:
lowercase__ : str = self.data[key].std()
except: # noqa: E722
pass
lowercase__ : Tuple = env.observation_space.shape[0]
lowercase__ : Optional[int] = env.action_space.shape[0]
def UpperCAmelCase ( self : str ,_snake_case : Any ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase ( self : Dict ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
if type(_snake_case ) is dict:
return {k: self.to_torch(_snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(_snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(_snake_case ,device=self.unet.device )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
lowercase__ : List[Any] = val.clone()
return x_in
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = x.shape[0]
lowercase__ : Dict = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ : Dict = torch.full((batch_size,) ,_snake_case ,device=self.unet.device ,dtype=torch.long )
for _ in range(_snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ : int = self.value_function(x.permute(0 ,2 ,1 ) ,_snake_case ).sample
lowercase__ : Optional[Any] = torch.autograd.grad([y.sum()] ,[x] )[0]
lowercase__ : List[str] = self.scheduler._get_variance(_snake_case )
lowercase__ : Union[str, Any] = torch.exp(0.5 * posterior_variance )
lowercase__ : Optional[int] = model_std * grad
lowercase__ : Optional[Any] = 0
lowercase__ : str = x.detach()
lowercase__ : Dict = x + scale * grad
lowercase__ : str = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.unet(x.permute(0 ,2 ,1 ) ,_snake_case ).sample.permute(0 ,2 ,1 )
# TODO: verify deprecation of this kwarg
lowercase__ : Dict = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ,predict_epsilon=_snake_case )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase__ : Dict = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.to_torch(_snake_case )
return x, y
def __call__( self : Union[str, Any] ,_snake_case : Any ,_snake_case : Tuple=64 ,_snake_case : Any=32 ,_snake_case : Optional[Any]=2 ,_snake_case : str=0.1 ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = self.normalize(_snake_case ,'''observations''' )
lowercase__ : Tuple = obs[None].repeat(_snake_case ,axis=0 )
lowercase__ : Dict = {0: self.to_torch(_snake_case )}
lowercase__ : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ : Optional[int] = randn_tensor(_snake_case ,device=self.unet.device )
lowercase__ : Tuple = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : str = self.to_torch(_snake_case )
# run the diffusion process
lowercase__ , lowercase__ : int = self.run_diffusion(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# sort output trajectories by value
lowercase__ : Optional[Any] = y.argsort(0 ,descending=_snake_case ).squeeze()
lowercase__ : str = x[sorted_idx]
lowercase__ : str = sorted_values[:, :, : self.action_dim]
lowercase__ : Optional[int] = actions.detach().cpu().numpy()
lowercase__ : List[str] = self.de_normalize(_snake_case ,key='''actions''' )
# select the action with the highest value
if y is not None:
lowercase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ : str = np.random.randint(0 ,_snake_case )
lowercase__ : int = denorm_actions[selected_index, 0]
return denorm_actions
| 302
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ , lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 302
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/krishnap25/mauve''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/krishnap25/mauve'''] ,reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : List[str]=None ,_snake_case : Tuple=None ,_snake_case : List[Any]=None ,_snake_case : Any=None ,_snake_case : Optional[int]="auto" ,_snake_case : Optional[int]=-1 ,_snake_case : Optional[int]=0.9 ,_snake_case : Any=5 ,_snake_case : Dict=500 ,_snake_case : Optional[int]="gpt2-large" ,_snake_case : Optional[Any]=-1 ,_snake_case : Tuple=1_024 ,_snake_case : Optional[int]=25 ,_snake_case : Dict=5 ,_snake_case : int=True ,_snake_case : Union[str, Any]=25 ,) -> Any:
"""simple docstring"""
lowercase__ : Any = compute_mauve(
p_text=_snake_case ,q_text=_snake_case ,p_features=_snake_case ,q_features=_snake_case ,p_tokens=_snake_case ,q_tokens=_snake_case ,num_buckets=_snake_case ,pca_max_data=_snake_case ,kmeans_explained_var=_snake_case ,kmeans_num_redo=_snake_case ,kmeans_max_iter=_snake_case ,featurize_model_name=_snake_case ,device_id=_snake_case ,max_text_length=_snake_case ,divergence_curve_discretization_size=_snake_case ,mauve_scaling_factor=_snake_case ,verbose=_snake_case ,seed=_snake_case ,)
return out
| 302
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : str = []
lowercase__ : Tuple = []
lowercase__ : int = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
lowercase__ : Dict = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(__lowerCamelCase ) , '''Postfix'''.center(__lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ) , (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ) , (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : Optional[Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
lowercase__ : Union[str, Any] = ''')''' # change "(" to ")"
elif infix[i] == ")":
lowercase__ : int = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase_ = input('\nEnter an Infix Equation = ') # Input an Infix equation
lowerCAmelCase_ = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 302
|
"""simple docstring"""
import math
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Tuple = 0
lowercase__ : Tuple = 0
while num > 0:
lowercase__ : int = num % 8
lowercase__ : Tuple = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
lowercase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(__lowerCamelCase )}"""
def __UpperCAmelCase ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
'''simple docstring'''
def __init__( self : int ,_snake_case : Dict ,_snake_case : Any=13 ,_snake_case : Optional[Any]=30 ,_snake_case : List[Any]=2 ,_snake_case : Tuple=3 ,_snake_case : Optional[Any]=True ,_snake_case : str=True ,_snake_case : List[str]=32 ,_snake_case : Optional[int]=2 ,_snake_case : Dict=4 ,_snake_case : str=37 ,_snake_case : Union[str, Any]="gelu" ,_snake_case : Optional[Any]=0.1 ,_snake_case : Optional[Any]=0.1 ,_snake_case : Any=10 ,_snake_case : Optional[int]=0.02 ,_snake_case : List[Any]=3 ,_snake_case : Any=None ,) -> Any:
"""simple docstring"""
lowercase__ : Any = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Any = patch_size
lowercase__ : Dict = num_channels
lowercase__ : List[str] = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Tuple = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Optional[int] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : int = (image_size // patch_size) ** 2
lowercase__ : Optional[int] = num_patches + 1
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,)
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = TFViTModel(config=_snake_case )
lowercase__ : Union[str, Any] = model(_snake_case ,training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowercase__ : Any = self.image_size // 2
lowercase__ : Optional[int] = pixel_values[:, :, :image_size, :image_size]
lowercase__ : Optional[int] = model(_snake_case ,interpolate_pos_encoding=_snake_case ,training=_snake_case )
lowercase__ : int = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : str ,_snake_case : Dict ,_snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = self.type_sequence_label_size
lowercase__ : Dict = TFViTForImageClassification(_snake_case )
lowercase__ : int = model(_snake_case ,labels=_snake_case ,training=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowercase__ : Any = self.image_size // 2
lowercase__ : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
lowercase__ : List[str] = model(_snake_case ,interpolate_pos_encoding=_snake_case ,training=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : Any = 1
lowercase__ : List[Any] = TFViTForImageClassification(_snake_case )
lowercase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[int] = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Tuple = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Dict = False
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = TFViTModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,tf.keras.layers.Layer ) )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ : int = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ : int = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
lowercase__ : List[str] = self.default_image_processor
lowercase__ : str = prepare_img()
lowercase__ : Any = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : Optional[int] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Optional[int] = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 )
| 302
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase_ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase_ = 'UperNetConfig'
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : int ,_snake_case : int ,_snake_case : Union[int, Tuple[int, int]] ,_snake_case : Union[int, Tuple[int, int], str] = 0 ,_snake_case : bool = False ,_snake_case : Union[int, Tuple[int, int]] = 1 ,) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = nn.Convad(
in_channels=_snake_case ,out_channels=_snake_case ,kernel_size=_snake_case ,padding=_snake_case ,bias=_snake_case ,dilation=_snake_case ,)
lowercase__ : Tuple = nn.BatchNormad(_snake_case )
lowercase__ : List[str] = nn.ReLU()
def UpperCAmelCase ( self : str ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.conv(_snake_case )
lowercase__ : List[str] = self.batch_norm(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : int ,_snake_case : int ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = [
nn.AdaptiveAvgPoolad(_snake_case ),
UperNetConvModule(_snake_case ,_snake_case ,kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Any = input
for layer in self.layers:
lowercase__ : int = layer(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : Tuple[int, ...] ,_snake_case : int ,_snake_case : int ,_snake_case : bool ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = pool_scales
lowercase__ : Dict = align_corners
lowercase__ : Optional[Any] = in_channels
lowercase__ : Optional[Any] = channels
lowercase__ : int = []
for i, pool_scale in enumerate(_snake_case ):
lowercase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_snake_case ,in_channels=_snake_case ,channels=_snake_case )
self.blocks.append(_snake_case )
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : torch.Tensor ) -> List[torch.Tensor]:
"""simple docstring"""
lowercase__ : int = []
for ppm in self.blocks:
lowercase__ : Any = ppm(_snake_case )
lowercase__ : int = nn.functional.interpolate(
_snake_case ,size=x.size()[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
ppm_outs.append(_snake_case )
return ppm_outs
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ : str = config
lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__ : Optional[Any] = in_channels
lowercase__ : Any = config.hidden_size
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
# PSP Module
lowercase__ : Dict = UperNetPyramidPoolingModule(
self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,)
lowercase__ : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
# FPN Module
lowercase__ : Any = nn.ModuleList()
lowercase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__ : List[Any] = UperNetConvModule(_snake_case ,self.channels ,kernel_size=1 )
lowercase__ : Optional[int] = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 )
self.lateral_convs.append(_snake_case )
self.fpn_convs.append(_snake_case )
lowercase__ : int = UperNetConvModule(
len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Dict = inputs[-1]
lowercase__ : Optional[int] = [x]
psp_outs.extend(self.psp_modules(_snake_case ) )
lowercase__ : Optional[Any] = torch.cat(_snake_case ,dim=1 )
lowercase__ : List[str] = self.bottleneck(_snake_case )
return output
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_snake_case ) )
# build top-down path
lowercase__ : List[Any] = len(_snake_case )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Union[str, Any] = laterals[i - 1].shape[2:]
lowercase__ : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] ,size=_snake_case ,mode='''bilinear''' ,align_corners=self.align_corners )
# build outputs
lowercase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Any = nn.functional.interpolate(
fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
lowercase__ : Any = torch.cat(_snake_case ,dim=1 )
lowercase__ : Any = self.fpn_bottleneck(_snake_case )
lowercase__ : str = self.classifier(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : List[Any] ,_snake_case : int = 2 ,_snake_case : int = 3 ,_snake_case : Union[int, Tuple[int, int]] = 1 ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = config
lowercase__ : Dict = config.auxiliary_in_channels
lowercase__ : Optional[int] = config.auxiliary_channels
lowercase__ : List[Any] = config.auxiliary_num_convs
lowercase__ : List[Any] = config.auxiliary_concat_input
lowercase__ : str = in_index
lowercase__ : Any = (kernel_size // 2) * dilation
lowercase__ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
if self.num_convs == 0:
lowercase__ : List[str] = nn.Identity()
else:
lowercase__ : Dict = nn.Sequential(*_snake_case )
if self.concat_input:
lowercase__ : int = UperNetConvModule(
self.in_channels + self.channels ,self.channels ,kernel_size=_snake_case ,padding=kernel_size // 2 )
lowercase__ : List[str] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : str = encoder_hidden_states[self.in_index]
lowercase__ : List[str] = self.convs(_snake_case )
if self.concat_input:
lowercase__ : Any = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) )
lowercase__ : Dict = self.classifier(_snake_case )
return output
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = UperNetConfig
lowerCAmelCase : str = "pixel_values"
lowerCAmelCase : Dict = True
def UpperCAmelCase ( self : int ,_snake_case : str ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : str=False ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[Any] = value
lowerCAmelCase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,A_ ,)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Tuple ) -> int:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__ : Any = UperNetHead(_snake_case ,in_channels=self.backbone.channels )
lowercase__ : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_snake_case ,config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs(
_snake_case ,output_hidden_states=_snake_case ,output_attentions=_snake_case )
lowercase__ : Optional[int] = outputs.feature_maps
lowercase__ : Tuple = self.decode_head(_snake_case )
lowercase__ : Optional[int] = nn.functional.interpolate(_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : List[str] = None
if self.auxiliary_head is not None:
lowercase__ : str = self.auxiliary_head(_snake_case )
lowercase__ : Dict = nn.functional.interpolate(
_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__ : Union[str, Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__ : Tuple = (logits,) + outputs[1:]
else:
lowercase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 302
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCAmelCase : int = "ChineseCLIPImageProcessor"
lowerCAmelCase : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple ,_snake_case : str=None ,_snake_case : Union[str, Any]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
lowercase__ : List[Any] = self.image_processor
def __call__( self : List[Any] ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : List[Any]=None ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : List[Any] ,**_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,*_snake_case : Tuple ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
| 302
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase_ = _symbol_database.Default()
lowerCAmelCase_ = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowerCAmelCase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase_ = None
lowerCAmelCase_ = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase_ = 45
lowerCAmelCase_ = 1_581
lowerCAmelCase_ = 1_517
lowerCAmelCase_ = 1_570
lowerCAmelCase_ = 1_584
lowerCAmelCase_ = 1_793
lowerCAmelCase_ = 1_795
lowerCAmelCase_ = 1_916
lowerCAmelCase_ = 1_864
lowerCAmelCase_ = 1_905
lowerCAmelCase_ = 1_919
lowerCAmelCase_ = 2_429
lowerCAmelCase_ = 2_208
lowerCAmelCase_ = 2_418
lowerCAmelCase_ = 2_323
lowerCAmelCase_ = 2_407
# @@protoc_insertion_point(module_scope)
| 302
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 1
|
"""simple docstring"""
from statistics import mean
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list:
lowercase__ : str = 0
# Number of processes finished
lowercase__ : List[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase__ : int = [0] * no_of_process
# List to include calculation results
lowercase__ : Dict = [0] * no_of_process
# Sort by arrival time.
lowercase__ : str = [burst_time[i] for i in np.argsort(__lowerCamelCase )]
lowercase__ : Tuple = [process_name[i] for i in np.argsort(__lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase__ : Optional[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase__ : Tuple = arrival_time[i]
lowercase__ : Tuple = 0
# Index showing the location of the process being performed
lowercase__ : Any = 0
# Saves the current response ratio.
lowercase__ : List[str] = 0
for i in range(0 , __lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase__ : int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase__ : List[Any] = temp
lowercase__ : Union[str, Any] = i
# Calculate the turn around time
lowercase__ : Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase__ : Union[str, Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list:
lowercase__ : Optional[int] = [0] * no_of_process
for i in range(0 , __lowerCamelCase ):
lowercase__ : str = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCAmelCase_ = 5
lowerCAmelCase_ = ['A', 'B', 'C', 'D', 'E']
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCAmelCase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 302
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = '''The dog is cute and lives in the garden house'''
lowercase__ : int = jnp.array([tokenizer.encode(_snake_case )] )
lowercase__ : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase__ : Optional[Any] = model(_snake_case )['''last_hidden_state''']
self.assertEqual(output.shape ,_snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_snake_case ,atol=1e-3 ) )
| 302
| 1
|
"""simple docstring"""
lowerCAmelCase_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
lowerCAmelCase_ = frozenset(['prompt', 'negative_prompt'])
lowerCAmelCase_ = frozenset([])
lowerCAmelCase_ = frozenset(['image'])
lowerCAmelCase_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
lowerCAmelCase_ = frozenset(['image'])
lowerCAmelCase_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
lowerCAmelCase_ = frozenset(['prompt', 'image', 'negative_prompt'])
lowerCAmelCase_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
lowerCAmelCase_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
lowerCAmelCase_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
lowerCAmelCase_ = frozenset(['image', 'mask_image'])
lowerCAmelCase_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
lowerCAmelCase_ = frozenset(['example_image', 'image', 'mask_image'])
lowerCAmelCase_ = frozenset(['class_labels'])
lowerCAmelCase_ = frozenset(['class_labels'])
lowerCAmelCase_ = frozenset(['batch_size'])
lowerCAmelCase_ = frozenset([])
lowerCAmelCase_ = frozenset(['batch_size'])
lowerCAmelCase_ = frozenset([])
lowerCAmelCase_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
lowerCAmelCase_ = frozenset(['prompt', 'negative_prompt'])
lowerCAmelCase_ = frozenset(['input_tokens'])
lowerCAmelCase_ = frozenset(['input_tokens'])
| 302
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = '#'
class __A :
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
lowercase__ : dict = {}
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> None:
"""simple docstring"""
lowercase__ : str = self._trie
for char in text:
if char not in trie:
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[Any] = trie[char]
lowercase__ : Dict = True
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> tuple | list:
"""simple docstring"""
lowercase__ : Optional[Any] = self._trie
for char in prefix:
if char in trie:
lowercase__ : Union[str, Any] = trie[char]
else:
return []
return self._elements(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : dict ) -> tuple:
"""simple docstring"""
lowercase__ : str = []
for c, v in d.items():
lowercase__ : List[Any] = [''' '''] if c == END else [(c + s) for s in self._elements(_snake_case )]
result.extend(_snake_case )
return tuple(_snake_case )
lowerCAmelCase_ = Trie()
lowerCAmelCase_ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __UpperCAmelCase ( __lowerCamelCase ) -> tuple:
lowercase__ : List[Any] = trie.find_word(__lowerCamelCase )
return tuple(string + word for word in suffixes )
def __UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( A_ ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowercase__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case ,'''tf_padding''' ) )
self.parent.assertTrue(hasattr(_snake_case ,'''depth_multiplier''' ) )
class __A :
'''simple docstring'''
def __init__( self : Any ,_snake_case : Tuple ,_snake_case : List[str]=13 ,_snake_case : Dict=3 ,_snake_case : List[str]=32 ,_snake_case : Optional[Any]=0.25 ,_snake_case : Dict=8 ,_snake_case : Any=8 ,_snake_case : str=6 ,_snake_case : str=32 ,_snake_case : Tuple=True ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Dict="relu6" ,_snake_case : Tuple=1_280 ,_snake_case : Optional[Any]=0.1 ,_snake_case : Union[str, Any]=0.02 ,_snake_case : int=True ,_snake_case : str=True ,_snake_case : Tuple=10 ,_snake_case : str=None ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = parent
lowercase__ : List[str] = batch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : int = image_size
lowercase__ : str = depth_multiplier
lowercase__ : List[Any] = depth_divisible_by
lowercase__ : Union[str, Any] = min_depth
lowercase__ : Any = expand_ratio
lowercase__ : Optional[int] = tf_padding
lowercase__ : List[str] = output_stride
lowercase__ : List[str] = first_layer_is_expansion
lowercase__ : Optional[int] = finegrained_output
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase__ : Any = classifier_dropout_prob
lowercase__ : Dict = use_labels
lowercase__ : Dict = is_training
lowercase__ : str = num_labels
lowercase__ : Optional[int] = initializer_range
lowercase__ : str = scope
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,depth_divisible_by=self.depth_divisible_by ,min_depth=self.min_depth ,expand_ratio=self.expand_ratio ,output_stride=self.output_stride ,first_layer_is_expansion=self.first_layer_is_expansion ,finegrained_output=self.finegrained_output ,hidden_act=self.hidden_act ,tf_padding=self.tf_padding ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Union[str, Any] ,_snake_case : Tuple ,_snake_case : int ,_snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = MobileNetVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
self.parent.assertEqual(
result.pooler_output.shape ,(self.batch_size, self.last_hidden_size) ,)
def UpperCAmelCase ( self : Dict ,_snake_case : List[Any] ,_snake_case : str ,_snake_case : str ,_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = self.num_labels
lowercase__ : str = MobileNetVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any ,_snake_case : Union[str, Any] ,_snake_case : Optional[Any] ,_snake_case : str ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.num_labels
lowercase__ : Optional[int] = MobileNetVaForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
lowercase__ : Dict = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase : Any = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = MobileNetVaModelTester(self )
lowercase__ : Any = MobileNetVaConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : int ,_snake_case : int ):
lowercase__ : Tuple = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = 16
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = MobileNetVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(_snake_case )
lowercase__ : List[str] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**_snake_case )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase__ : Optional[Any] = model.to(_snake_case )
lowercase__ : List[Any] = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase__ : Tuple = prepare_img()
lowercase__ : str = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
lowercase__ : Any = outputs.logits
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape ,_snake_case )
lowercase__ : List[Any] = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] ,device=_snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_snake_case ,atol=1e-4 ) )
| 302
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase_ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase_ = 'RegNetConfig'
# Base docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = 'tabby, tabby cat'
lowerCAmelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = nn.Convad(
_snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,)
lowercase__ : List[Any] = nn.BatchNormad(_snake_case )
lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.convolution(_snake_case )
lowercase__ : Tuple = self.normalization(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
lowercase__ : str = config.num_channels
def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ : Optional[int] = self.embedder(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case )
lowercase__ : Any = nn.BatchNormad(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.convolution(_snake_case )
lowercase__ : Optional[int] = self.normalization(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ : Dict = nn.Sequential(
nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.pooler(_snake_case )
lowercase__ : Union[str, Any] = self.attention(_snake_case )
lowercase__ : List[str] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = in_channels != out_channels or stride != 1
lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width )
lowercase__ : str = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : Optional[int] = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : str = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = hidden_state
lowercase__ : Union[str, Any] = self.layer(_snake_case )
lowercase__ : List[Any] = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = in_channels != out_channels or stride != 1
lowercase__ : List[str] = max(1 ,out_channels // config.groups_width )
lowercase__ : Tuple = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : str = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : str = hidden_state
lowercase__ : Optional[Any] = self.layer(_snake_case )
lowercase__ : int = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : str = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.layers(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ):
self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : int = hidden_states + (hidden_state,)
lowercase__ : Any = stage_module(_snake_case )
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = RegNetConfig
lowerCAmelCase : List[Any] = "regnet"
lowerCAmelCase : Optional[int] = "pixel_values"
lowerCAmelCase : Union[str, Any] = True
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : str = value
lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Any = config
lowercase__ : List[str] = RegNetEmbeddings(_snake_case )
lowercase__ : Any = RegNetEncoder(_snake_case )
lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = self.embedder(_snake_case )
lowercase__ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : str = encoder_outputs[0]
lowercase__ : Optional[int] = self.pooler(_snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Optional[Any] = config.num_labels
lowercase__ : int = RegNetModel(_snake_case )
# classification head
lowercase__ : str = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Union[str, Any] = self.classifier(_snake_case )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Dict = '''single_label_classification'''
else:
lowercase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Tuple = CrossEntropyLoss()
lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Any = BCEWithLogitsLoss()
lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case )
if not return_dict:
lowercase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 302
| 1
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = "cpu" , __lowerCamelCase = None ) -> None:
lowercase__ : List[str] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowercase__ : List[Any] = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Any = src_path
torch.save(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 302
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = 1.6021E-19 # units = C
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 10_00 ) -> int:
return sum(e for e in range(3 , __lowerCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 302
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["pixel_values"]
def __init__( self : Tuple ,_snake_case : bool = True ,_snake_case : Optional[Dict[str, int]] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : Dict[str, int] = None ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : Optional[Any] ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : str = size if size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case )
lowercase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case ,param_name='''crop_size''' )
lowercase__ : Tuple = do_resize
lowercase__ : List[Any] = do_rescale
lowercase__ : Any = do_normalize
lowercase__ : List[str] = do_center_crop
lowercase__ : Optional[Any] = crop_size
lowercase__ : Union[str, Any] = size
lowercase__ : Any = resample
lowercase__ : int = rescale_factor
lowercase__ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : str ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : List[str] = get_size_dict(_snake_case )
if "shortest_edge" in size:
lowercase__ : str = get_resize_output_image_size(_snake_case ,size=size['''shortest_edge'''] ,default_to_square=_snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowercase__ : int = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Tuple ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Optional[Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : float ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[int] ) -> np.ndarray:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : ImageInput ,_snake_case : Optional[bool] = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : int = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[float] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**_snake_case : List[str] ,) -> BatchFeature:
"""simple docstring"""
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Tuple = get_size_dict(_snake_case ,param_name='''crop_size''' ,default_to_square=_snake_case )
lowercase__ : Tuple = resample if resample is not None else self.resample
lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : int = get_size_dict(_snake_case )
if not is_batched(_snake_case ):
lowercase__ : Optional[Any] = [images]
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase__ : str = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : int = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
lowercase__ : str = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
lowercase__ : Optional[Any] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : List[str] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Any = {'''pixel_values''': images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 302
| 1
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int = 101 ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = length
def __len__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.length
def __getitem__( self : Optional[Any] ,_snake_case : int ) -> int:
"""simple docstring"""
return i
class __A :
'''simple docstring'''
def __call__( self : Tuple ,_snake_case : Any ) -> Any:
"""simple docstring"""
return {"input_ids": torch.tensor(_snake_case ), "labels": torch.tensor(_snake_case )}
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__ : Dict = nn.Linear(120 ,80 )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ,_snake_case : Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 ,device=input_ids.device ), input_ids
else:
return input_ids
class __A ( A_ ):
'''simple docstring'''
@require_torch_neuroncore
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase__ : Dict = self.get_auto_remove_tmp_dir()
lowercase__ : int = f"""--output_dir {output_dir}""".split()
lowercase__ : List[str] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __A ( A_ ):
'''simple docstring'''
@require_torch_multi_gpu
def UpperCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase__ : Tuple = self.get_auto_remove_tmp_dir()
lowercase__ : Optional[int] = f"""--output_dir {output_dir}""".split()
lowercase__ : List[str] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCAmelCase_ = HfArgumentParser((TrainingArguments,))
lowerCAmelCase_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowerCAmelCase_ = DummyDataset(dataset_length)
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
lowercase__ : Tuple = list(range(len(__lowerCamelCase ) ) )
lowercase__ : Any = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
lowerCAmelCase_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCAmelCase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase_ = 2
lowerCAmelCase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase_ = None
| 302
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302
| 1
|
"""simple docstring"""
import math
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Tuple = 0
lowercase__ : Tuple = 0
while num > 0:
lowercase__ : int = num % 8
lowercase__ : Tuple = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
lowercase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(__lowerCamelCase )}"""
def __UpperCAmelCase ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 302
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = "cpu" , __lowerCamelCase = None ) -> None:
lowercase__ : List[str] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowercase__ : List[Any] = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Any = src_path
torch.save(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 302
| 1
|
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Tuple ,_snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(len(_snake_case ) ,len(_snake_case ) )
for a, b in zip(_snake_case ,_snake_case ):
self.assertAlmostEqual(_snake_case ,_snake_case ,delta=_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_snake_case ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step ,3 )
self.assertEqual(len(accumulator.gradients ) ,1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1e-2 )
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = None
ops.enable_eager_execution_internal()
lowercase__ : Optional[int] = tf.config.list_physical_devices('''CPU''' )
if len(_snake_case ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowercase__ : List[str] = tf.config.list_logical_devices(device_type='''CPU''' )
lowercase__ : Dict = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowercase__ : Optional[int] = GradientAccumulator()
lowercase__ : List[Any] = tf.Variable([4.0, 3.0] )
lowercase__ , lowercase__ : Optional[int] = create_optimizer(5e-5 ,10 ,5 )
lowercase__ : List[str] = tf.Variable([0.0, 0.0] ,trainable=_snake_case )
def accumulate_on_replica(_snake_case : Dict ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable] ) ) )
@tf.function
def accumulate(_snake_case : Union[str, Any] ,_snake_case : Any ):
with strategy.scope():
lowercase__ : Union[str, Any] = strategy.experimental_local_results(_snake_case )
local_variables[0].assign(_snake_case )
local_variables[1].assign(_snake_case )
strategy.run(_snake_case ,args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_snake_case )
def _check_local_values(_snake_case : Any ,_snake_case : Tuple ):
lowercase__ : str = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() ,_snake_case ,tol=1e-2 )
self.assertListAlmostEqual(values[1].value() ,_snake_case ,tol=1e-2 )
accumulate([1.0, 2.0] ,[-1.0, 1.0] )
accumulate([3.0, -1.0] ,[-1.0, -1.0] )
accumulate([-2.0, 2.0] ,[3.0, -2.0] )
self.assertEqual(accumulator.step ,3 )
_check_local_values([2.0, 3.0] ,[1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
_check_local_values([0.0, 0.0] ,[0.0, 0.0] )
| 302
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ , lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 302
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : List[str] ,_snake_case : int=13 ,_snake_case : int=7 ,_snake_case : Optional[Any]=True ,_snake_case : Any=True ,_snake_case : Union[str, Any]=True ,_snake_case : Optional[int]=True ,_snake_case : Dict=99 ,_snake_case : int=32 ,_snake_case : List[Any]=5 ,_snake_case : Optional[Any]=4 ,_snake_case : Optional[Any]=37 ,_snake_case : Dict="gelu" ,_snake_case : Union[str, Any]=0.1 ,_snake_case : Union[str, Any]=0.1 ,_snake_case : List[str]=512 ,_snake_case : Tuple=16 ,_snake_case : Optional[Any]=2 ,_snake_case : Optional[int]=0.02 ,_snake_case : Any=4 ,) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = parent
lowercase__ : List[str] = batch_size
lowercase__ : List[str] = seq_length
lowercase__ : str = is_training
lowercase__ : List[Any] = use_attention_mask
lowercase__ : List[str] = use_token_type_ids
lowercase__ : str = use_labels
lowercase__ : Optional[int] = vocab_size
lowercase__ : Tuple = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : List[Any] = num_choices
def UpperCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : List[str] = None
if self.use_attention_mask:
lowercase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
if self.use_token_type_ids:
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase__ : int = RobertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : int = config_and_inputs
lowercase__ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : str = True
lowercase__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : int = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained('''roberta-base''' ,from_pt=_snake_case )
lowercase__ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
| 302
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "maskformer"
lowerCAmelCase : Any = {"hidden_size": "mask_feature_size"}
lowerCAmelCase : Optional[int] = ["resnet", "swin"]
lowerCAmelCase : str = ["detr"]
def __init__( self : int ,_snake_case : int = 256 ,_snake_case : int = 256 ,_snake_case : float = 0.1 ,_snake_case : bool = False ,_snake_case : Optional[Dict] = None ,_snake_case : Optional[Dict] = None ,_snake_case : float = 0.02 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 20.0 ,_snake_case : Optional[bool] = None ,**_snake_case : Optional[Any] ,) -> Dict:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ : Any = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,)
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[str] = backbone_config.pop('''model_type''' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ : Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(_snake_case ,_snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase__ : Optional[Any] = config_class.from_dict(_snake_case )
lowercase__ : List[Any] = backbone_config
lowercase__ : List[Any] = decoder_config
# main feature dimension for the model
lowercase__ : List[str] = fpn_feature_size
lowercase__ : int = mask_feature_size
# initializer
lowercase__ : str = init_std
lowercase__ : str = init_xavier_std
# Hungarian matcher && loss
lowercase__ : Optional[int] = cross_entropy_weight
lowercase__ : List[Any] = dice_weight
lowercase__ : List[str] = mask_weight
lowercase__ : str = use_auxiliary_loss
lowercase__ : Optional[int] = no_object_weight
lowercase__ : Optional[Any] = output_auxiliary_logits
lowercase__ : Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase__ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return cls(
backbone_config=_snake_case ,decoder_config=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : str ) -> Dict[str, any]:
"""simple docstring"""
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : List[Any] = self.decoder_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 302
| 1
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( ) -> Dict:
# Get the sagemaker specific mp parameters from smp_options variable.
lowercase__ : List[Any] = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowercase__ : Tuple = json.loads(__lowerCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowercase__ : Optional[int] = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowercase__ : Union[str, Any] = json.loads(__lowerCamelCase )
if not mpi_options.get('''sagemaker_mpi_enabled''' , __lowerCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = field(
default="" ,metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} ,)
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' ,_snake_case ,)
@cached_property
def UpperCAmelCase ( self : Tuple ) -> "torch.device":
"""simple docstring"""
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
lowercase__ : str = torch.device('''cpu''' )
lowercase__ : Union[str, Any] = 0
elif is_sagemaker_model_parallel_available():
lowercase__ : List[str] = smp.local_rank()
lowercase__ : Tuple = torch.device('''cuda''' ,_snake_case )
lowercase__ : Optional[Any] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' ,timeout=self.ddp_timeout_delta )
lowercase__ : Tuple = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
lowercase__ : Any = torch.device('''cuda''' ,self.local_rank )
lowercase__ : Dict = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowercase__ : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowercase__ : Tuple = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' ,timeout=self.ddp_timeout_delta )
lowercase__ : Tuple = torch.device('''cuda''' ,self.local_rank )
lowercase__ : List[Any] = 1
if device.type == "cuda":
torch.cuda.set_device(_snake_case )
return device
@property
def UpperCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return False
| 302
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : Dict = [3, 3, 3, 3]
lowercase__ : str = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : List[str] = [4, 4, 4, 4]
lowercase__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
else:
lowercase__ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[int] = 96
elif "small" in model_name:
lowercase__ : Union[str, Any] = 96
elif "base" in model_name:
lowercase__ : Tuple = 1_28
elif "large" in model_name:
lowercase__ : Any = 1_92
elif "xlarge" in model_name:
lowercase__ : Any = 2_56
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 3_52
# set label information
lowercase__ : List[Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ : Optional[int] = '''imagenet-22k-id2label.json'''
else:
lowercase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowercase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ : Dict = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ : Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ : Union[str, Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ : Dict = '''layernorm.bias'''
if "head" in name:
lowercase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__ : List[Any] = '''focalnet.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[str]:
# fmt: off
lowercase__ : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ : Optional[int] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowercase__ : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ : int = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
lowercase__ : List[Any] = get_focalnet_config(__lowerCamelCase )
lowercase__ : Optional[int] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : int = BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowercase__ : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase__ : List[str] = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
lowercase__ : Optional[Any] = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__ : Optional[int] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Dict = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__ : List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 1
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (EulerDiscreteScheduler,)
lowerCAmelCase : Any = 1_0
def UpperCAmelCase ( self : Optional[int] ,**_snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_snake_case )
return config
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case ,beta_end=_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[Any] = self.scheduler_classes[0]
lowercase__ : List[str] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : int = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : Any = scheduler.scale_model_input(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = model(_snake_case ,_snake_case )
lowercase__ : List[str] = scheduler.step(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ : Dict = output.prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(_snake_case ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase__ : List[str] = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ : Tuple = torch.manual_seed(0 )
lowercase__ : int = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ : Optional[Any] = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : Dict = scheduler.scale_model_input(_snake_case ,_snake_case )
lowercase__ : Dict = model(_snake_case ,_snake_case )
lowercase__ : str = scheduler.step(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ : Union[str, Any] = output.prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(_snake_case ) )
lowercase__ : Dict = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config()
lowercase__ : Any = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps ,device=_snake_case )
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : List[Any] = self.dummy_model()
lowercase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase__ : Union[str, Any] = sample.to(_snake_case )
for t in scheduler.timesteps:
lowercase__ : Optional[int] = scheduler.scale_model_input(_snake_case ,_snake_case )
lowercase__ : str = model(_snake_case ,_snake_case )
lowercase__ : List[Any] = scheduler.step(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ : List[Any] = output.prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(_snake_case ) )
lowercase__ : str = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**_snake_case ,use_karras_sigmas=_snake_case )
scheduler.set_timesteps(self.num_inference_steps ,device=_snake_case )
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : Union[str, Any] = self.dummy_model()
lowercase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase__ : str = sample.to(_snake_case )
for t in scheduler.timesteps:
lowercase__ : Dict = scheduler.scale_model_input(_snake_case ,_snake_case )
lowercase__ : Tuple = model(_snake_case ,_snake_case )
lowercase__ : int = scheduler.step(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ : str = output.prev_sample
lowercase__ : Any = torch.sum(torch.abs(_snake_case ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 302
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCAmelCase : int = "ChineseCLIPImageProcessor"
lowerCAmelCase : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple ,_snake_case : str=None ,_snake_case : Union[str, Any]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
lowercase__ : List[Any] = self.image_processor
def __call__( self : List[Any] ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : List[Any]=None ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : List[Any] ,**_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,*_snake_case : Tuple ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
| 302
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.test()
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : int = 0
lowercase__ : List[Any] = False
while not completed:
if counter == 1:
self.reset()
lowercase__ : str = self.advance()
if not self.does_advance(_snake_case ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.update(_snake_case )
counter += 1
if counter > 10_000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : int ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : Dict ,_snake_case : int ) -> str:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Any ,_snake_case : List[int] ) -> List[str]:
"""simple docstring"""
super(_snake_case ,self ).__init__()
if not isinstance(_snake_case ,_snake_case ) or len(_snake_case ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_snake_case ,_snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
lowercase__ : Dict = token_ids
lowercase__ : str = len(self.token_ids )
lowercase__ : int = -1 # the index of the currently fulfilled step
lowercase__ : Dict = False
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self : Dict ,_snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(_snake_case )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self : Any ,_snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(_snake_case )}""" )
lowercase__ : Optional[int] = False
lowercase__ : int = False
lowercase__ : int = False
if self.does_advance(_snake_case ):
self.fulfilled_idx += 1
lowercase__ : str = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase__ : Optional[int] = True
lowercase__ : Tuple = completed
else:
# failed to make progress.
lowercase__ : Optional[Any] = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ : str = False
lowercase__ : List[str] = 0
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase ( self : str ,_snake_case : int=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = PhrasalConstraint(self.token_ids )
if stateful:
lowercase__ : Optional[int] = self.seqlen
lowercase__ : Tuple = self.fulfilled_idx
lowercase__ : int = self.completed
return new_constraint
class __A :
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : List[List[int]] ,_snake_case : Optional[Any]=True ) -> Any:
"""simple docstring"""
lowercase__ : Any = max([len(_snake_case ) for one in nested_token_ids] )
lowercase__ : Tuple = {}
for token_ids in nested_token_ids:
lowercase__ : int = root
for tidx, token_id in enumerate(_snake_case ):
if token_id not in level:
lowercase__ : Union[str, Any] = {}
lowercase__ : List[str] = level[token_id]
if no_subsets and self.has_subsets(_snake_case ,_snake_case ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f""" {nested_token_ids}.""" )
lowercase__ : Tuple = root
def UpperCAmelCase ( self : List[Any] ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = self.trie
for current_token in current_seq:
lowercase__ : Optional[Any] = start[current_token]
lowercase__ : Union[str, Any] = list(start.keys() )
return next_tokens
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = self.next_tokens(_snake_case )
return len(_snake_case ) == 0
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = list(root.values() )
if len(_snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(_snake_case ) for nn in next_nodes] )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : int ,_snake_case : int ) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = self.count_leaves(_snake_case )
return len(_snake_case ) != leaf_count
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : List[List[int]] ) -> int:
"""simple docstring"""
super(_snake_case ,self ).__init__()
if not isinstance(_snake_case ,_snake_case ) or len(_snake_case ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_snake_case ,_snake_case ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_snake_case ,_snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
lowercase__ : Dict = DisjunctiveTrie(_snake_case )
lowercase__ : Any = nested_token_ids
lowercase__ : Union[str, Any] = self.trie.max_height
lowercase__ : Union[str, Any] = []
lowercase__ : Dict = False
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = self.trie.next_tokens(self.current_seq )
if len(_snake_case ) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self : int ,_snake_case : int ) -> List[str]:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_snake_case )}""" )
lowercase__ : Any = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase ( self : Dict ,_snake_case : int ) -> Optional[int]:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_snake_case )}""" )
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : Tuple = False
if self.does_advance(_snake_case ):
self.current_seq.append(_snake_case )
lowercase__ : Optional[int] = True
else:
lowercase__ : List[Any] = True
self.reset()
lowercase__ : Optional[Any] = self.trie.reached_leaf(self.current_seq )
lowercase__ : List[str] = completed
return stepped, completed, reset
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : int = False
lowercase__ : List[str] = []
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict=False ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase__ : Dict = self.seqlen
lowercase__ : Any = self.current_seq
lowercase__ : Tuple = self.completed
return new_constraint
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : List[Constraint] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = constraints
# max # of steps required to fulfill a given constraint
lowercase__ : Tuple = max([c.seqlen for c in constraints] )
lowercase__ : Union[str, Any] = len(_snake_case )
lowercase__ : Any = False
self.init_state()
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowercase__ : Tuple = []
lowercase__ : List[str] = None
lowercase__ : Tuple = [constraint.copy(stateful=_snake_case ) for constraint in self.constraints]
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : Any = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase__ : str = constraint.advance()
if isinstance(_snake_case ,_snake_case ):
token_list.append(_snake_case )
elif isinstance(_snake_case ,_snake_case ):
token_list.extend(_snake_case )
else:
lowercase__ : Dict = self.inprogress_constraint.advance()
if isinstance(_snake_case ,_snake_case ):
token_list.append(_snake_case )
elif isinstance(_snake_case ,_snake_case ):
token_list.extend(_snake_case )
if len(_snake_case ) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[List[int]] ) -> List[str]:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase__ , lowercase__ : Union[str, Any] = self.add(_snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase ( self : List[Any] ,_snake_case : int ) -> int:
"""simple docstring"""
if not isinstance(_snake_case ,_snake_case ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
lowercase__ , lowercase__ : Dict = False, False
if self.completed:
lowercase__ : List[Any] = True
lowercase__ : int = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = self.inprogress_constraint.update(_snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_snake_case ) )
lowercase__ : Tuple = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase__ : Any = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase__ : Tuple = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_snake_case ):
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = pending_constraint.update(_snake_case )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_snake_case )
lowercase__ : List[Any] = None
if not complete and stepped:
lowercase__ : Any = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase__ : List[str] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase__ : Dict = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase ( self : Dict ,_snake_case : Any=True ) -> int:
"""simple docstring"""
lowercase__ : Tuple = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase__ : Any = [
constraint.copy(stateful=_snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase__ : Union[str, Any] = self.inprogress_constraint.copy(stateful=_snake_case )
lowercase__ : str = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 302
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 1
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase_ = ''
lowerCAmelCase_ = ''
lowerCAmelCase_ = ''
lowerCAmelCase_ = 1 # (0 is vertical, 1 is horizontal)
def __UpperCAmelCase ( ) -> None:
lowercase__ , lowercase__ : List[Any] = get_dataset(__lowerCamelCase , __lowerCamelCase )
print('''Processing...''' )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = update_image_and_anno(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for index, image in enumerate(__lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase__ : str = random_chars(32 )
lowercase__ : int = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase__ : Dict = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__lowerCamelCase )} with {file_name}""" )
lowercase__ : Optional[int] = []
for anno in new_annos[index]:
lowercase__ : Tuple = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__lowerCamelCase )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]:
lowercase__ : Dict = []
lowercase__ : Tuple = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , '''*.txt''' ) ):
lowercase__ : str = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__lowerCamelCase ) as in_file:
lowercase__ : Any = in_file.readlines()
lowercase__ : str = os.path.join(__lowerCamelCase , f"""{label_name}.jpg""" )
lowercase__ : Optional[Any] = []
for obj_list in obj_lists:
lowercase__ : Dict = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 ) -> tuple[list, list, list]:
lowercase__ : Union[str, Any] = []
lowercase__ : Union[str, Any] = []
lowercase__ : Optional[Any] = []
for idx in range(len(__lowerCamelCase ) ):
lowercase__ : Any = []
lowercase__ : Tuple = img_list[idx]
path_list.append(__lowerCamelCase )
lowercase__ : List[str] = anno_list[idx]
lowercase__ : Tuple = cva.imread(__lowerCamelCase )
if flip_type == 1:
lowercase__ : Optional[int] = cva.flip(__lowerCamelCase , __lowerCamelCase )
for bbox in img_annos:
lowercase__ : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowercase__ : Optional[int] = cva.flip(__lowerCamelCase , __lowerCamelCase )
for bbox in img_annos:
lowercase__ : str = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCamelCase )
new_imgs_list.append(__lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def __UpperCAmelCase ( __lowerCamelCase = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowercase__ : Tuple = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 302
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
lowerCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ : str = import_module('''tasks''' )
try:
lowercase__ : List[str] = getattr(__lowerCamelCase , model_args.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowercase__ : Dict[int, str] = dict(enumerate(__lowerCamelCase ) )
lowercase__ : Optional[int] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ : str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase , __lowerCamelCase ) -> Tuple[List[int], List[int]]:
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=2 )
lowercase__ , lowercase__ : Tuple = preds.shape
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
lowercase__ : Tuple = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase ) -> Dict:
lowercase__ , lowercase__ : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase__ : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase__ : Optional[int] = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(__lowerCamelCase )
lowercase__ , lowercase__ : Tuple = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> list[str]:
if nth_term == "":
return [""]
lowercase__ : Tuple = int(__lowerCamelCase )
lowercase__ : int = int(__lowerCamelCase )
lowercase__ : list[str] = []
for temp in range(int(__lowerCamelCase ) ):
series.append(f"""1 / {pow(temp + 1 , int(__lowerCamelCase ) )}""" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = int(input('Enter the last number (nth term) of the P-Series'))
lowerCAmelCase_ = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 302
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[int]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Dict = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : Any = 2
# Initialize accelerator
lowercase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : List[Any] = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : List[str] = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : int = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : int = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __A ( A_ ,A_ ):
'''simple docstring'''
@register_to_config
def __init__( self : int ,_snake_case : int = 128 ,_snake_case : int = 256 ,_snake_case : float = 2000.0 ,_snake_case : int = 768 ,_snake_case : int = 12 ,_snake_case : int = 12 ,_snake_case : int = 64 ,_snake_case : int = 2_048 ,_snake_case : float = 0.1 ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.Sequential(
nn.Linear(_snake_case ,d_model * 4 ,bias=_snake_case ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_snake_case ) ,nn.SiLU() ,)
lowercase__ : int = nn.Embedding(_snake_case ,_snake_case )
lowercase__ : Optional[int] = False
lowercase__ : List[str] = nn.Linear(_snake_case ,_snake_case ,bias=_snake_case )
lowercase__ : Dict = nn.Dropout(p=_snake_case )
lowercase__ : Union[str, Any] = nn.ModuleList()
for lyr_num in range(_snake_case ):
# FiLM conditional T5 decoder
lowercase__ : int = DecoderLayer(d_model=_snake_case ,d_kv=_snake_case ,num_heads=_snake_case ,d_ff=_snake_case ,dropout_rate=_snake_case )
self.decoders.append(_snake_case )
lowercase__ : Optional[int] = TaLayerNorm(_snake_case )
lowercase__ : int = nn.Dropout(p=_snake_case )
lowercase__ : List[Any] = nn.Linear(_snake_case ,_snake_case ,bias=_snake_case )
def UpperCAmelCase ( self : str ,_snake_case : List[Any] ,_snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase ( self : List[str] ,_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowercase__ : str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
lowercase__ : int = self.conditioning_emb(_snake_case ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowercase__ : List[str] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowercase__ : List[Any] = torch.broadcast_to(
torch.arange(_snake_case ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
lowercase__ : Union[str, Any] = self.position_encoding(_snake_case )
lowercase__ : Optional[Any] = self.continuous_inputs_projection(_snake_case )
inputs += position_encodings
lowercase__ : List[Any] = self.dropout(_snake_case )
# decoder: No padding present.
lowercase__ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowercase__ : Tuple = [(x, self.encoder_decoder_mask(_snake_case ,_snake_case )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowercase__ : Optional[int] = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
lowercase__ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
lowercase__ : int = lyr(
_snake_case ,conditioning_emb=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,)[0]
lowercase__ : Dict = self.decoder_norm(_snake_case )
lowercase__ : Union[str, Any] = self.post_dropout(_snake_case )
lowercase__ : int = self.spec_out(_snake_case )
return spec_out
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : str ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int=1e-6 ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_snake_case ,d_kv=_snake_case ,num_heads=_snake_case ,dropout_rate=_snake_case ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_snake_case ,d_kv=_snake_case ,num_heads=_snake_case ,dropout_rate=_snake_case ,layer_norm_epsilon=_snake_case ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_snake_case ,d_ff=_snake_case ,dropout_rate=_snake_case ,layer_norm_epsilon=_snake_case ) )
def UpperCAmelCase ( self : int ,_snake_case : int ,_snake_case : Optional[Any]=None ,_snake_case : str=None ,_snake_case : List[str]=None ,_snake_case : Dict=None ,_snake_case : List[str]=None ,) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = self.layer[0](
_snake_case ,conditioning_emb=_snake_case ,attention_mask=_snake_case ,)
if encoder_hidden_states is not None:
lowercase__ : Union[str, Any] = torch.where(encoder_attention_mask > 0 ,0 ,-1e10 ).to(
encoder_hidden_states.dtype )
lowercase__ : int = self.layer[1](
_snake_case ,key_value_states=_snake_case ,attention_mask=_snake_case ,)
# Apply Film Conditional Feed Forward layer
lowercase__ : List[Any] = self.layer[-1](_snake_case ,_snake_case )
return (hidden_states,)
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : Any ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = TaLayerNorm(_snake_case )
lowercase__ : str = TaFiLMLayer(in_features=d_model * 4 ,out_features=_snake_case )
lowercase__ : str = Attention(query_dim=_snake_case ,heads=_snake_case ,dim_head=_snake_case ,out_bias=_snake_case ,scale_qk=_snake_case )
lowercase__ : Optional[Any] = nn.Dropout(_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : List[Any] ,_snake_case : Optional[Any]=None ,_snake_case : Union[str, Any]=None ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = self.layer_norm(_snake_case )
if conditioning_emb is not None:
lowercase__ : int = self.FiLMLayer(_snake_case ,_snake_case )
# Self-attention block
lowercase__ : Union[str, Any] = self.attention(_snake_case )
lowercase__ : List[Any] = hidden_states + self.dropout(_snake_case )
return hidden_states
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[int] ,_snake_case : List[Any] ,_snake_case : List[Any] ,_snake_case : Tuple ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = Attention(query_dim=_snake_case ,heads=_snake_case ,dim_head=_snake_case ,out_bias=_snake_case ,scale_qk=_snake_case )
lowercase__ : List[Any] = TaLayerNorm(_snake_case ,eps=_snake_case )
lowercase__ : List[str] = nn.Dropout(_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : str ,_snake_case : str=None ,_snake_case : Optional[Any]=None ,) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.layer_norm(_snake_case )
lowercase__ : List[Any] = self.attention(
_snake_case ,encoder_hidden_states=_snake_case ,attention_mask=attention_mask.squeeze(1 ) ,)
lowercase__ : Union[str, Any] = hidden_states + self.dropout(_snake_case )
return layer_output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Tuple ,_snake_case : str ,_snake_case : Optional[Any] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = TaDenseGatedActDense(d_model=_snake_case ,d_ff=_snake_case ,dropout_rate=_snake_case )
lowercase__ : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_snake_case )
lowercase__ : Tuple = TaLayerNorm(_snake_case ,eps=_snake_case )
lowercase__ : str = nn.Dropout(_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ,_snake_case : List[str]=None ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.layer_norm(_snake_case )
if conditioning_emb is not None:
lowercase__ : str = self.film(_snake_case ,_snake_case )
lowercase__ : Dict = self.DenseReluDense(_snake_case )
lowercase__ : str = hidden_states + self.dropout(_snake_case )
return hidden_states
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : Any ,_snake_case : str ,_snake_case : Optional[Any] ) -> int:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = nn.Linear(_snake_case ,_snake_case ,bias=_snake_case )
lowercase__ : Dict = nn.Linear(_snake_case ,_snake_case ,bias=_snake_case )
lowercase__ : Tuple = nn.Linear(_snake_case ,_snake_case ,bias=_snake_case )
lowercase__ : Dict = nn.Dropout(_snake_case )
lowercase__ : Optional[Any] = NewGELUActivation()
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.act(self.wi_a(_snake_case ) )
lowercase__ : Any = self.wi_a(_snake_case )
lowercase__ : str = hidden_gelu * hidden_linear
lowercase__ : Any = self.dropout(_snake_case )
lowercase__ : int = self.wo(_snake_case )
return hidden_states
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : Any ,_snake_case : Union[str, Any]=1e-6 ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.Parameter(torch.ones(_snake_case ) )
lowercase__ : List[Any] = eps
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_snake_case )
lowercase__ : Union[str, Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowercase__ : Optional[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __A ( nn.Module ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_snake_case ,3.0 )) ))
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Any ,_snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = nn.Linear(_snake_case ,out_features * 2 ,bias=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Any ,_snake_case : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = self.scale_bias(_snake_case )
lowercase__ , lowercase__ : Optional[Any] = torch.chunk(_snake_case ,2 ,-1 )
lowercase__ : Union[str, Any] = x * (1 + scale) + shift
return x
| 302
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : str = tokenizer('''This is me''' ,return_tensors='''pt''' )
lowercase__ : Tuple = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ : Optional[int] = model.generate(**_snake_case )
lowercase__ : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ : int = model_reloaded.generate(**_snake_case )
self.assertTrue(torch.allclose(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_snake_case ):
model.save_pretrained(_snake_case )
lowercase__ : int = model.reverse_bettertransformer()
model.save_pretrained(_snake_case )
| 302
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __UpperCAmelCase ( __lowerCamelCase ) -> list[tuple[int, int]]:
lowercase__ : Optional[int] = 0
lowercase__ : Optional[int] = len(__lowerCamelCase ) # No of vertices in graph
lowercase__ : Optional[int] = [0] * n
lowercase__ : Optional[int] = [False] * n
def dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
lowercase__ : str = True
lowercase__ : List[str] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , id_ )
lowercase__ : Tuple = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase__ : Any = min(low[at] , low[to] )
lowercase__ : list[tuple[int, int]] = []
for i in range(__lowerCamelCase ):
if not visited[i]:
dfs(__lowerCamelCase , -1 , __lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Any:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
lowercase__ : List[Any] = torch.load(__lowerCamelCase , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
lowercase__ : int = convert_pytorch_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowercase__ : Dict = convert_pytorch_sharded_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
return flax_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(__lowerCamelCase ) -> bool:
return len(set(__lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowercase__ : int = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowercase__ : Any = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowercase__ : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ : Optional[int] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowercase__ : List[str] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# convert pytorch tensor to numpy
lowercase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowercase__ : str = flax_model.params['''params''']
else:
lowercase__ : Optional[int] = flax_model.params
lowercase__ : Optional[Any] = flatten_dict(__lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Tuple = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__lowerCamelCase )
lowercase__ : int = {}
lowercase__ : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : Optional[Any] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : List[str] = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowercase__ : int = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : Tuple = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Any = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
import torch
# Load the index
lowercase__ : Dict = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowercase__ : Optional[int] = torch.load(__lowerCamelCase )
lowercase__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Optional[Any] = flax_model.params['''params''']
lowercase__ : List[Any] = flatten_dict(__lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowercase__ : Union[str, Any] = flax_model.params
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : List[str] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Tuple = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : str = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
continue
if "var" in flax_key[-1]:
lowercase__ : str = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : List[str] = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
lowercase__ : Optional[int] = getattr(__lowerCamelCase , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCamelCase , '''rb''' ) as state_f:
try:
lowercase__ : str = from_bytes(__lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase__ : Any = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa , __lowerCamelCase ) ).values()
if any(__lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase__ : Union[str, Any] = jax.tree_util.tree_map(
lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCamelCase )
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : List[str] = pt_model.state_dict()
lowercase__ : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowercase__ : int = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowercase__ : List[str] = []
lowercase__ : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ : List[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
lowercase__ : Optional[int] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCamelCase ) not in pt_model_dict:
# conv layer
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : List[str] = jnp.transpose(__lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ) not in pt_model_dict:
# linear layer
lowercase__ : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowercase__ : Any = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowercase__ : Union[str, Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowercase__ : Dict = '''.'''.join(__lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowercase__ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowercase__ : str = key.split('''.''' )
lowercase__ : Optional[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowercase__ : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowercase__ : str = key_components[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[int] = key_components[:-3] + [name]
lowercase__ : List[str] = '''.'''.join(__lowerCamelCase )
lowercase__ : List[Any] = key
if flax_key in special_pt_names:
lowercase__ : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowercase__ : List[str] = np.asarray(__lowerCamelCase ) if not isinstance(__lowerCamelCase , np.ndarray ) else flax_tensor
lowercase__ : List[str] = torch.from_numpy(__lowerCamelCase )
# remove from missing keys
missing_keys.remove(__lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCamelCase )
pt_model.load_state_dict(__lowerCamelCase )
# re-transform missing_keys to list
lowercase__ : Optional[Any] = list(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(__lowerCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __UpperCAmelCase ( __lowerCamelCase ) -> list[str]:
lowercase__ : List[str] = []
lowercase__ : int = 11
lowercase__ : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCamelCase , __lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCamelCase , __lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowercase__ : Optional[Any] = 10
return solutions
def __UpperCAmelCase ( __lowerCamelCase = 2 ) -> int:
lowercase__ : Optional[Any] = 1.0
for fraction in fraction_list(__lowerCamelCase ):
lowercase__ : Union[str, Any] = Fraction(__lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 302
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Any ,_snake_case : UNetaDModel ,_snake_case : UNetaDModel ,_snake_case : DDPMScheduler ,_snake_case : Any ,) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = value_function
lowercase__ : Optional[int] = unet
lowercase__ : Tuple = scheduler
lowercase__ : Dict = env
lowercase__ : int = env.get_dataset()
lowercase__ : Dict = {}
for key in self.data.keys():
try:
lowercase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ : List[Any] = {}
for key in self.data.keys():
try:
lowercase__ : str = self.data[key].std()
except: # noqa: E722
pass
lowercase__ : Tuple = env.observation_space.shape[0]
lowercase__ : Optional[int] = env.action_space.shape[0]
def UpperCAmelCase ( self : str ,_snake_case : Any ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase ( self : Dict ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
if type(_snake_case ) is dict:
return {k: self.to_torch(_snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(_snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(_snake_case ,device=self.unet.device )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
lowercase__ : List[Any] = val.clone()
return x_in
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = x.shape[0]
lowercase__ : Dict = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ : Dict = torch.full((batch_size,) ,_snake_case ,device=self.unet.device ,dtype=torch.long )
for _ in range(_snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ : int = self.value_function(x.permute(0 ,2 ,1 ) ,_snake_case ).sample
lowercase__ : Optional[Any] = torch.autograd.grad([y.sum()] ,[x] )[0]
lowercase__ : List[str] = self.scheduler._get_variance(_snake_case )
lowercase__ : Union[str, Any] = torch.exp(0.5 * posterior_variance )
lowercase__ : Optional[int] = model_std * grad
lowercase__ : Optional[Any] = 0
lowercase__ : str = x.detach()
lowercase__ : Dict = x + scale * grad
lowercase__ : str = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.unet(x.permute(0 ,2 ,1 ) ,_snake_case ).sample.permute(0 ,2 ,1 )
# TODO: verify deprecation of this kwarg
lowercase__ : Dict = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ,predict_epsilon=_snake_case )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase__ : Dict = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.to_torch(_snake_case )
return x, y
def __call__( self : Union[str, Any] ,_snake_case : Any ,_snake_case : Tuple=64 ,_snake_case : Any=32 ,_snake_case : Optional[Any]=2 ,_snake_case : str=0.1 ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = self.normalize(_snake_case ,'''observations''' )
lowercase__ : Tuple = obs[None].repeat(_snake_case ,axis=0 )
lowercase__ : Dict = {0: self.to_torch(_snake_case )}
lowercase__ : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ : Optional[int] = randn_tensor(_snake_case ,device=self.unet.device )
lowercase__ : Tuple = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : str = self.to_torch(_snake_case )
# run the diffusion process
lowercase__ , lowercase__ : int = self.run_diffusion(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# sort output trajectories by value
lowercase__ : Optional[Any] = y.argsort(0 ,descending=_snake_case ).squeeze()
lowercase__ : str = x[sorted_idx]
lowercase__ : str = sorted_values[:, :, : self.action_dim]
lowercase__ : Optional[int] = actions.detach().cpu().numpy()
lowercase__ : List[str] = self.de_normalize(_snake_case ,key='''actions''' )
# select the action with the highest value
if y is not None:
lowercase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ : str = np.random.randint(0 ,_snake_case )
lowercase__ : int = denorm_actions[selected_index, 0]
return denorm_actions
| 302
| 1
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class __A ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = '''A painting of a squirrel eating a burger '''
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : List[str] = pipe(
prompt=_snake_case ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
lowercase__ : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = generator.manual_seed(0 )
lowercase__ : List[Any] = pipe(
prompt=_snake_case ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[Any] = '''A painting of a squirrel eating a burger '''
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : Optional[Any] = pipe(
prompt=_snake_case ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ).images
lowercase__ : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : str = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 302
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/krishnap25/mauve''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/krishnap25/mauve'''] ,reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : List[str]=None ,_snake_case : Tuple=None ,_snake_case : List[Any]=None ,_snake_case : Any=None ,_snake_case : Optional[int]="auto" ,_snake_case : Optional[int]=-1 ,_snake_case : Optional[int]=0.9 ,_snake_case : Any=5 ,_snake_case : Dict=500 ,_snake_case : Optional[int]="gpt2-large" ,_snake_case : Optional[Any]=-1 ,_snake_case : Tuple=1_024 ,_snake_case : Optional[int]=25 ,_snake_case : Dict=5 ,_snake_case : int=True ,_snake_case : Union[str, Any]=25 ,) -> Any:
"""simple docstring"""
lowercase__ : Any = compute_mauve(
p_text=_snake_case ,q_text=_snake_case ,p_features=_snake_case ,q_features=_snake_case ,p_tokens=_snake_case ,q_tokens=_snake_case ,num_buckets=_snake_case ,pca_max_data=_snake_case ,kmeans_explained_var=_snake_case ,kmeans_num_redo=_snake_case ,kmeans_max_iter=_snake_case ,featurize_model_name=_snake_case ,device_id=_snake_case ,max_text_length=_snake_case ,divergence_curve_discretization_size=_snake_case ,mauve_scaling_factor=_snake_case ,verbose=_snake_case ,seed=_snake_case ,)
return out
| 302
| 1
|
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> None:
lowercase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""{len(__lowerCamelCase )} != {len(__lowerCamelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowerCAmelCase_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowerCAmelCase_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
try:
lowercase__ : List[Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(__lowerCamelCase ) )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__lowerCamelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = "student" , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=False , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
lowercase__ : Optional[Any] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCamelCase , __lowerCamelCase ):
AutoTokenizer.from_pretrained(__lowerCamelCase ).save_pretrained(__lowerCamelCase ) # purely for convenience
lowercase__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ).eval()
else:
assert isinstance(__lowerCamelCase , __lowerCamelCase ), f"""teacher must be a model or string got type {type(__lowerCamelCase )}"""
lowercase__ : Optional[int] = teacher.config.to_diff_dict()
try:
lowercase__ , lowercase__ : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowercase__ : str = teacher_e
if d is None:
lowercase__ : int = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
lowercase__ , lowercase__ : Any = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowercase__ , lowercase__ : Tuple = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowercase__ : int = teacher_e
if d is None:
lowercase__ : Dict = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCamelCase )
# Copy weights
lowercase__ : int = teacher.config_class(**__lowerCamelCase )
lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_config(__lowerCamelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowercase__ : int = student.load_state_dict(teacher.state_dict() , strict=__lowerCamelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowercase__ , lowercase__ : Optional[int] = list(range(__lowerCamelCase ) ), list(range(__lowerCamelCase ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(__lowerCamelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowercase__ : List[int] = pick_layers_to_copy(__lowerCamelCase , __lowerCamelCase )
if d_layers_to_copy is None:
lowercase__ : List[int] = pick_layers_to_copy(__lowerCamelCase , __lowerCamelCase )
try:
if hasattr(
__lowerCamelCase , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCamelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCamelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCamelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCamelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCamelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCamelCase )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
lowercase__ : Optional[int] = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(__lowerCamelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 302
|
"""simple docstring"""
import math
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Tuple = 0
lowercase__ : Tuple = 0
while num > 0:
lowercase__ : int = num % 8
lowercase__ : Tuple = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
lowercase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(__lowerCamelCase )}"""
def __UpperCAmelCase ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase_ = 'src/transformers'
# Matches is_xxx_available()
lowerCAmelCase_ = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCAmelCase_ = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCAmelCase_ = re.compile(R'^\s*try:')
# Catches a line with else:
lowerCAmelCase_ = re.compile(R'^\s*else:')
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
if _re_test_backend.search(__lowerCamelCase ) is None:
return None
lowercase__ : Dict = [b[0] for b in _re_backend.findall(__lowerCamelCase )]
backends.sort()
return "_and_".join(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase__ : str = f.readlines()
lowercase__ : Optional[int] = 0
while line_index < len(__lowerCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ : Any = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase__ : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCamelCase ):
lowercase__ : List[Any] = _re_one_line_import_struct.search(__lowerCamelCase ).groups()[0]
lowercase__ : List[Any] = re.findall('''\[([^\]]+)\]''' , __lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase__ : List[str] = _re_import_struct_key_value.search(__lowerCamelCase )
if single_line_import_search is not None:
lowercase__ : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase__ : Optional[int] = lines[line_index]
if _re_import_struct_add_one.search(__lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCamelCase ) is not None:
lowercase__ : Dict = _re_import_struct_add_many.search(__lowerCamelCase ).groups()[0].split(''', ''' )
lowercase__ : Tuple = [obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_between_brackets.search(__lowerCamelCase ) is not None:
lowercase__ : int = _re_between_brackets.search(__lowerCamelCase ).groups()[0].split(''', ''' )
lowercase__ : List[Any] = [obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_quote_object.search(__lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ : Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ : Union[str, Any] = []
while (
line_index < len(__lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase__ : int = lines[line_index]
lowercase__ : Union[str, Any] = _re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ : Tuple = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase__ : Any = lines[line_index]
lowercase__ : List[str] = _re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
def find_duplicates(__lowerCamelCase ):
return [k for k, v in collections.Counter(__lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ : Tuple = []
for key in import_dict_objects.keys():
lowercase__ : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase__ : str = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ : str = '''base imports''' if key == '''none''' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __UpperCAmelCase ( ) -> Any:
lowercase__ : Optional[Any] = []
for root, _, files in os.walk(__lowerCamelCase ):
if "__init__.py" in files:
lowercase__ : Optional[Any] = os.path.join(__lowerCamelCase , '''__init__.py''' )
lowercase__ : Optional[int] = parse_init(__lowerCamelCase )
if objects is not None:
lowercase__ : str = analyze_results(*__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowercase__ : Any = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(__lowerCamelCase ) )
if len(__lowerCamelCase ) > 0:
raise ValueError('''\n\n'''.join(__lowerCamelCase ) )
def __UpperCAmelCase ( ) -> List[str]:
lowercase__ : int = []
for path, directories, files in os.walk(__lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase__ : Union[str, Any] = str((Path(__lowerCamelCase ) / folder).relative_to(__lowerCamelCase ) )
lowercase__ : Union[str, Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(__lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ : int = str((Path(__lowerCamelCase ) / fname).relative_to(__lowerCamelCase ) )
lowercase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__lowerCamelCase )
return submodules
lowerCAmelCase_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def __UpperCAmelCase ( ) -> Optional[int]:
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ : Optional[Any] = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(__lowerCamelCase , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase__ : Union[str, Any] = spec.loader.load_module()
lowercase__ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__lowerCamelCase ) > 0:
lowercase__ : int = '''\n'''.join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 302
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase_ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase_ = 'UperNetConfig'
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : int ,_snake_case : int ,_snake_case : Union[int, Tuple[int, int]] ,_snake_case : Union[int, Tuple[int, int], str] = 0 ,_snake_case : bool = False ,_snake_case : Union[int, Tuple[int, int]] = 1 ,) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = nn.Convad(
in_channels=_snake_case ,out_channels=_snake_case ,kernel_size=_snake_case ,padding=_snake_case ,bias=_snake_case ,dilation=_snake_case ,)
lowercase__ : Tuple = nn.BatchNormad(_snake_case )
lowercase__ : List[str] = nn.ReLU()
def UpperCAmelCase ( self : str ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.conv(_snake_case )
lowercase__ : List[str] = self.batch_norm(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : int ,_snake_case : int ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = [
nn.AdaptiveAvgPoolad(_snake_case ),
UperNetConvModule(_snake_case ,_snake_case ,kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Any = input
for layer in self.layers:
lowercase__ : int = layer(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : Tuple[int, ...] ,_snake_case : int ,_snake_case : int ,_snake_case : bool ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = pool_scales
lowercase__ : Dict = align_corners
lowercase__ : Optional[Any] = in_channels
lowercase__ : Optional[Any] = channels
lowercase__ : int = []
for i, pool_scale in enumerate(_snake_case ):
lowercase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_snake_case ,in_channels=_snake_case ,channels=_snake_case )
self.blocks.append(_snake_case )
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : torch.Tensor ) -> List[torch.Tensor]:
"""simple docstring"""
lowercase__ : int = []
for ppm in self.blocks:
lowercase__ : Any = ppm(_snake_case )
lowercase__ : int = nn.functional.interpolate(
_snake_case ,size=x.size()[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
ppm_outs.append(_snake_case )
return ppm_outs
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ : str = config
lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__ : Optional[Any] = in_channels
lowercase__ : Any = config.hidden_size
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
# PSP Module
lowercase__ : Dict = UperNetPyramidPoolingModule(
self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,)
lowercase__ : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
# FPN Module
lowercase__ : Any = nn.ModuleList()
lowercase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__ : List[Any] = UperNetConvModule(_snake_case ,self.channels ,kernel_size=1 )
lowercase__ : Optional[int] = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 )
self.lateral_convs.append(_snake_case )
self.fpn_convs.append(_snake_case )
lowercase__ : int = UperNetConvModule(
len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Dict = inputs[-1]
lowercase__ : Optional[int] = [x]
psp_outs.extend(self.psp_modules(_snake_case ) )
lowercase__ : Optional[Any] = torch.cat(_snake_case ,dim=1 )
lowercase__ : List[str] = self.bottleneck(_snake_case )
return output
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_snake_case ) )
# build top-down path
lowercase__ : List[Any] = len(_snake_case )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Union[str, Any] = laterals[i - 1].shape[2:]
lowercase__ : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] ,size=_snake_case ,mode='''bilinear''' ,align_corners=self.align_corners )
# build outputs
lowercase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Any = nn.functional.interpolate(
fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
lowercase__ : Any = torch.cat(_snake_case ,dim=1 )
lowercase__ : Any = self.fpn_bottleneck(_snake_case )
lowercase__ : str = self.classifier(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : List[Any] ,_snake_case : int = 2 ,_snake_case : int = 3 ,_snake_case : Union[int, Tuple[int, int]] = 1 ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = config
lowercase__ : Dict = config.auxiliary_in_channels
lowercase__ : Optional[int] = config.auxiliary_channels
lowercase__ : List[Any] = config.auxiliary_num_convs
lowercase__ : List[Any] = config.auxiliary_concat_input
lowercase__ : str = in_index
lowercase__ : Any = (kernel_size // 2) * dilation
lowercase__ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
if self.num_convs == 0:
lowercase__ : List[str] = nn.Identity()
else:
lowercase__ : Dict = nn.Sequential(*_snake_case )
if self.concat_input:
lowercase__ : int = UperNetConvModule(
self.in_channels + self.channels ,self.channels ,kernel_size=_snake_case ,padding=kernel_size // 2 )
lowercase__ : List[str] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : str = encoder_hidden_states[self.in_index]
lowercase__ : List[str] = self.convs(_snake_case )
if self.concat_input:
lowercase__ : Any = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) )
lowercase__ : Dict = self.classifier(_snake_case )
return output
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = UperNetConfig
lowerCAmelCase : str = "pixel_values"
lowerCAmelCase : Dict = True
def UpperCAmelCase ( self : int ,_snake_case : str ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : str=False ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[Any] = value
lowerCAmelCase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,A_ ,)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Tuple ) -> int:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__ : Any = UperNetHead(_snake_case ,in_channels=self.backbone.channels )
lowercase__ : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_snake_case ,config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs(
_snake_case ,output_hidden_states=_snake_case ,output_attentions=_snake_case )
lowercase__ : Optional[int] = outputs.feature_maps
lowercase__ : Tuple = self.decode_head(_snake_case )
lowercase__ : Optional[int] = nn.functional.interpolate(_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : List[str] = None
if self.auxiliary_head is not None:
lowercase__ : str = self.auxiliary_head(_snake_case )
lowercase__ : Dict = nn.functional.interpolate(
_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__ : Union[str, Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__ : Tuple = (logits,) + outputs[1:]
else:
lowercase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 302
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : Dict = [3, 3, 3, 3]
lowercase__ : str = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : List[str] = [4, 4, 4, 4]
lowercase__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
else:
lowercase__ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[int] = 96
elif "small" in model_name:
lowercase__ : Union[str, Any] = 96
elif "base" in model_name:
lowercase__ : Tuple = 1_28
elif "large" in model_name:
lowercase__ : Any = 1_92
elif "xlarge" in model_name:
lowercase__ : Any = 2_56
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 3_52
# set label information
lowercase__ : List[Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ : Optional[int] = '''imagenet-22k-id2label.json'''
else:
lowercase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowercase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ : Dict = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ : Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ : Union[str, Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ : Dict = '''layernorm.bias'''
if "head" in name:
lowercase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__ : List[Any] = '''focalnet.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[str]:
# fmt: off
lowercase__ : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ : Optional[int] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowercase__ : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ : int = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
lowercase__ : List[Any] = get_focalnet_config(__lowerCamelCase )
lowercase__ : Optional[int] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : int = BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowercase__ : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase__ : List[str] = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
lowercase__ : Optional[Any] = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__ : Optional[int] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Dict = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__ : List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase_ = _symbol_database.Default()
lowerCAmelCase_ = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowerCAmelCase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase_ = None
lowerCAmelCase_ = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase_ = 45
lowerCAmelCase_ = 1_581
lowerCAmelCase_ = 1_517
lowerCAmelCase_ = 1_570
lowerCAmelCase_ = 1_584
lowerCAmelCase_ = 1_793
lowerCAmelCase_ = 1_795
lowerCAmelCase_ = 1_916
lowerCAmelCase_ = 1_864
lowerCAmelCase_ = 1_905
lowerCAmelCase_ = 1_919
lowerCAmelCase_ = 2_429
lowerCAmelCase_ = 2_208
lowerCAmelCase_ = 2_418
lowerCAmelCase_ = 2_323
lowerCAmelCase_ = 2_407
# @@protoc_insertion_point(module_scope)
| 302
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCamelCase ) )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> bool:
# Base Case
if index == len(__lowerCamelCase ):
return True
# Recursive Step
for i in range(__lowerCamelCase ):
if valid_coloring(graph[index] , __lowerCamelCase , __lowerCamelCase ):
# Color current vertex
lowercase__ : str = i
# Validate coloring
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ):
return True
# Backtrack
lowercase__ : List[Any] = -1
return False
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> list[int]:
lowercase__ : Union[str, Any] = [-1] * len(__lowerCamelCase )
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 ):
return colored_vertices
return []
| 302
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 1
|
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCAmelCase_ = Mapping[str, np.ndarray]
lowerCAmelCase_ = Mapping[str, Any] # Is a nested dict.
lowerCAmelCase_ = 0.0_1
@dataclasses.dataclass(frozen=A_ )
class __A :
'''simple docstring'''
lowerCAmelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCAmelCase : Optional[Sequence[int]] = None
def __UpperCAmelCase ( __lowerCamelCase ) -> Protein:
lowercase__ : Tuple = r'''(\[[A-Z]+\]\n)'''
lowercase__ : List[str] = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
lowercase__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase__ : List[str] = ["N", "CA", "C"]
lowercase__ : Dict = None
lowercase__ : Tuple = None
lowercase__ : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ : Union[str, Any] = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ : str = '''X''' # FIXME: strings are immutable
lowercase__ : List[str] = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
lowercase__ : Dict = np.array(__lowerCamelCase )
lowercase__ : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
lowercase__ : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ : Optional[int] = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase__ : Optional[int] = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
lowercase__ : List[str] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 0 ) -> List[str]:
lowercase__ : List[str] = []
lowercase__ : str = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
lowercase__ : Optional[int] = prot.parents
lowercase__ : int = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ : List[Any] = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
lowercase__ : List[str] = ['''N/A''']
pdb_headers.append(f"""PARENT {" ".join(__lowerCamelCase )}""" )
return pdb_headers
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : List[str] = []
lowercase__ : str = pdb_str.split('''\n''' )
lowercase__ : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
lowercase__ : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ : List[Any] = []
if prot.parents_chain_index is not None:
lowercase__ : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
lowercase__ : Optional[int] = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ : int = parent_dict.get(str(__lowerCamelCase ) , ['''N/A'''] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ : str = [['''N/A''']]
def make_parent_line(__lowerCamelCase ) -> str:
return f"""PARENT {" ".join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ : Union[str, Any] = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
lowercase__ : List[str] = parents_per_chain[chain_counter]
else:
lowercase__ : List[str] = ['''N/A''']
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : List[str] = residue_constants.restypes + ['''X''']
def res_atoa(__lowerCamelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase__ : Tuple = residue_constants.atom_types
lowercase__ : List[str] = []
lowercase__ : Dict = prot.atom_mask
lowercase__ : str = prot.aatype
lowercase__ : Tuple = prot.atom_positions
lowercase__ : int = prot.residue_index.astype(np.intaa )
lowercase__ : Dict = prot.b_factors
lowercase__ : Optional[int] = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase__ : str = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
lowercase__ : Union[str, Any] = aatype.shape[0]
lowercase__ : Dict = 1
lowercase__ : Optional[int] = 0
lowercase__ : str = string.ascii_uppercase
lowercase__ : int = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
lowercase__ : List[str] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ : List[str] = '''ATOM'''
lowercase__ : List[Any] = atom_name if len(__lowerCamelCase ) == 4 else f""" {atom_name}"""
lowercase__ : List[Any] = ''''''
lowercase__ : Dict = ''''''
lowercase__ : str = 1.0_0
lowercase__ : Optional[int] = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ : Any = ''''''
lowercase__ : str = '''A'''
if chain_index is not None:
lowercase__ : List[str] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ : Tuple = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
lowercase__ : Optional[Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ : int = True
lowercase__ : str = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ : Optional[Any] = '''TER'''
lowercase__ : str = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , ) -> Protein:
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 302
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = '''The dog is cute and lives in the garden house'''
lowercase__ : int = jnp.array([tokenizer.encode(_snake_case )] )
lowercase__ : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase__ : Optional[Any] = model(_snake_case )['''last_hidden_state''']
self.assertEqual(output.shape ,_snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_snake_case ,atol=1e-3 ) )
| 302
| 1
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = BloomTokenizerFast
lowerCAmelCase : Dict = BloomTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : Dict = False
lowerCAmelCase : Any = "tokenizer_file"
lowerCAmelCase : Union[str, Any] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
super().setUp()
lowercase__ : Any = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : Union[str, Any] ,**_snake_case : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = self.get_rust_tokenizer()
lowercase__ : Any = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase__ : List[str] = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
lowercase__ : Any = tokenizer.batch_encode_plus(_snake_case )['''input_ids''']
self.assertListEqual(_snake_case ,_snake_case )
lowercase__ : Tuple = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[int]=6 ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(_snake_case ,**_snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ : Tuple = '''This is a simple input'''
lowercase__ : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__ : Optional[int] = ('''This is a simple input''', '''This is a pair''')
lowercase__ : Union[str, Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(_snake_case ,max_length=_snake_case )
tokenizer_r.encode_plus(_snake_case ,max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case ,max_length=_snake_case )
tokenizer_r.encode(_snake_case ,max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case ,max_length=_snake_case )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase__ : List[str] = None # Hotfixing padding = None
self.assertRaises(_snake_case ,tokenizer_r.encode ,_snake_case ,max_length=_snake_case ,padding='''max_length''' )
# Simple input
self.assertRaises(_snake_case ,tokenizer_r.encode_plus ,_snake_case ,max_length=_snake_case ,padding='''max_length''' )
# Simple input
self.assertRaises(
_snake_case ,tokenizer_r.batch_encode_plus ,_snake_case ,max_length=_snake_case ,padding='''max_length''' ,)
# Pair input
self.assertRaises(_snake_case ,tokenizer_r.encode ,_snake_case ,max_length=_snake_case ,padding='''max_length''' )
# Pair input
self.assertRaises(_snake_case ,tokenizer_r.encode_plus ,_snake_case ,max_length=_snake_case ,padding='''max_length''' )
# Pair input
self.assertRaises(
_snake_case ,tokenizer_r.batch_encode_plus ,_snake_case ,max_length=_snake_case ,padding='''max_length''' ,)
def UpperCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowercase__ : int = self.get_rust_tokenizer()
lowercase__ : str = load_dataset('''xnli''' ,'''all_languages''' ,split='''test''' ,streaming=_snake_case )
lowercase__ : Any = next(iter(_snake_case ) )['''premise'''] # pick up one data
lowercase__ : Optional[int] = list(sample_data.values() )
lowercase__ : Union[str, Any] = list(map(tokenizer.encode ,_snake_case ) )
lowercase__ : Dict = [tokenizer.decode(_snake_case ,clean_up_tokenization_spaces=_snake_case ) for x in output_tokens]
self.assertListEqual(_snake_case ,_snake_case )
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 302
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = '#'
class __A :
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
lowercase__ : dict = {}
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> None:
"""simple docstring"""
lowercase__ : str = self._trie
for char in text:
if char not in trie:
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[Any] = trie[char]
lowercase__ : Dict = True
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> tuple | list:
"""simple docstring"""
lowercase__ : Optional[Any] = self._trie
for char in prefix:
if char in trie:
lowercase__ : Union[str, Any] = trie[char]
else:
return []
return self._elements(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : dict ) -> tuple:
"""simple docstring"""
lowercase__ : str = []
for c, v in d.items():
lowercase__ : List[Any] = [''' '''] if c == END else [(c + s) for s in self._elements(_snake_case )]
result.extend(_snake_case )
return tuple(_snake_case )
lowerCAmelCase_ = Trie()
lowerCAmelCase_ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __UpperCAmelCase ( __lowerCamelCase ) -> tuple:
lowercase__ : List[Any] = trie.find_word(__lowerCamelCase )
return tuple(string + word for word in suffixes )
def __UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ , lowercase__ : Any = len(__lowerCamelCase ), len(grid[0] )
if (
min(__lowerCamelCase , __lowerCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ : int = 0
count += depth_first_search(__lowerCamelCase , row + 1 , __lowerCamelCase , __lowerCamelCase )
count += depth_first_search(__lowerCamelCase , row - 1 , __lowerCamelCase , __lowerCamelCase )
count += depth_first_search(__lowerCamelCase , __lowerCamelCase , col + 1 , __lowerCamelCase )
count += depth_first_search(__lowerCamelCase , __lowerCamelCase , col - 1 , __lowerCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase_ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase_ = 'RegNetConfig'
# Base docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = 'tabby, tabby cat'
lowerCAmelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = nn.Convad(
_snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,)
lowercase__ : List[Any] = nn.BatchNormad(_snake_case )
lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.convolution(_snake_case )
lowercase__ : Tuple = self.normalization(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
lowercase__ : str = config.num_channels
def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ : Optional[int] = self.embedder(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case )
lowercase__ : Any = nn.BatchNormad(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.convolution(_snake_case )
lowercase__ : Optional[int] = self.normalization(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ : Dict = nn.Sequential(
nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.pooler(_snake_case )
lowercase__ : Union[str, Any] = self.attention(_snake_case )
lowercase__ : List[str] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = in_channels != out_channels or stride != 1
lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width )
lowercase__ : str = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : Optional[int] = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : str = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = hidden_state
lowercase__ : Union[str, Any] = self.layer(_snake_case )
lowercase__ : List[Any] = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = in_channels != out_channels or stride != 1
lowercase__ : List[str] = max(1 ,out_channels // config.groups_width )
lowercase__ : Tuple = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : str = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : str = hidden_state
lowercase__ : Optional[Any] = self.layer(_snake_case )
lowercase__ : int = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : str = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.layers(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ):
self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : int = hidden_states + (hidden_state,)
lowercase__ : Any = stage_module(_snake_case )
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = RegNetConfig
lowerCAmelCase : List[Any] = "regnet"
lowerCAmelCase : Optional[int] = "pixel_values"
lowerCAmelCase : Union[str, Any] = True
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : str = value
lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Any = config
lowercase__ : List[str] = RegNetEmbeddings(_snake_case )
lowercase__ : Any = RegNetEncoder(_snake_case )
lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = self.embedder(_snake_case )
lowercase__ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : str = encoder_outputs[0]
lowercase__ : Optional[int] = self.pooler(_snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Optional[Any] = config.num_labels
lowercase__ : int = RegNetModel(_snake_case )
# classification head
lowercase__ : str = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Union[str, Any] = self.classifier(_snake_case )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Dict = '''single_label_classification'''
else:
lowercase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Tuple = CrossEntropyLoss()
lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Any = BCEWithLogitsLoss()
lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case )
if not return_dict:
lowercase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 302
| 1
|
"""simple docstring"""
from math import factorial
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
lowercase__ : List[Any] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowercase__ : Union[str, Any] = float(factorial(__lowerCamelCase ) )
coefficient /= factorial(__lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.7_5))
| 302
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = 1.6021E-19 # units = C
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = 'Muhammad Umer Farooq'
lowerCAmelCase_ = 'MIT'
lowerCAmelCase_ = '1.0.0'
lowerCAmelCase_ = 'Muhammad Umer Farooq'
lowerCAmelCase_ = 'contact@muhammadumerfarooq.me'
lowerCAmelCase_ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : str ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : list[str] = []
lowercase__ : Dict = domain
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : str ,_snake_case : list[tuple[str, str | None]] ) -> None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowercase__ : Optional[Any] = parse.urljoin(self.domain ,_snake_case )
self.urls.append(_snake_case )
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
return ".".join(get_sub_domain_name(__lowerCamelCase ).split('''.''' )[-2:] )
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
return parse.urlparse(__lowerCamelCase ).netloc
def __UpperCAmelCase ( __lowerCamelCase = "https://github.com" ) -> list[str]:
lowercase__ : Union[str, Any] = get_domain_name(__lowerCamelCase )
# Initialize the parser
lowercase__ : Tuple = Parser(__lowerCamelCase )
try:
# Open URL
lowercase__ : Optional[int] = requests.get(__lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowercase__ : Optional[int] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowercase__ : str = requests.get(__lowerCamelCase )
# Get the valid email.
lowercase__ : int = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = emails_from_url('https://github.com')
print(F'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails)))
| 302
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["pixel_values"]
def __init__( self : Tuple ,_snake_case : bool = True ,_snake_case : Optional[Dict[str, int]] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : Dict[str, int] = None ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : Optional[Any] ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : str = size if size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case )
lowercase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case ,param_name='''crop_size''' )
lowercase__ : Tuple = do_resize
lowercase__ : List[Any] = do_rescale
lowercase__ : Any = do_normalize
lowercase__ : List[str] = do_center_crop
lowercase__ : Optional[Any] = crop_size
lowercase__ : Union[str, Any] = size
lowercase__ : Any = resample
lowercase__ : int = rescale_factor
lowercase__ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : str ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : List[str] = get_size_dict(_snake_case )
if "shortest_edge" in size:
lowercase__ : str = get_resize_output_image_size(_snake_case ,size=size['''shortest_edge'''] ,default_to_square=_snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowercase__ : int = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Tuple ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Optional[Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : float ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[int] ) -> np.ndarray:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : ImageInput ,_snake_case : Optional[bool] = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : int = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[float] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**_snake_case : List[str] ,) -> BatchFeature:
"""simple docstring"""
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Tuple = get_size_dict(_snake_case ,param_name='''crop_size''' ,default_to_square=_snake_case )
lowercase__ : Tuple = resample if resample is not None else self.resample
lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : int = get_size_dict(_snake_case )
if not is_batched(_snake_case ):
lowercase__ : Optional[Any] = [images]
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase__ : str = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : int = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
lowercase__ : str = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
lowercase__ : Optional[Any] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : List[str] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Any = {'''pixel_values''': images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 302
| 1
|
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('T')
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self : str ,_snake_case : bool = True ) -> None:
"""simple docstring"""
lowercase__ : dict[T, list[T]] = {} # dictionary of lists
lowercase__ : Dict = directed
def UpperCAmelCase ( self : Any ,_snake_case : T ,_snake_case : T ) -> GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
self.adj_list[destination_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
lowercase__ : Dict = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_snake_case )
lowercase__ : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase__ : List[Any] = [destination_vertex]
lowercase__ : Optional[int] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
lowercase__ : Optional[int] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase__ : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase__ : List[str] = [destination_vertex]
lowercase__ : str = []
return self
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return pformat(self.adj_list )
| 302
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
return 1 if input_a == input_a else 0
def __UpperCAmelCase ( ) -> None:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 302
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = "cpu" , __lowerCamelCase = None ) -> None:
lowercase__ : List[str] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowercase__ : List[Any] = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Any = src_path
torch.save(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('T')
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : T ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = data
lowercase__ : Node[T] | None = None
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
return f"""{self.data}"""
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self : int ) -> None:
"""simple docstring"""
lowercase__ : Node[T] | None = None
def __iter__( self : Tuple ) -> Iterator[T]:
"""simple docstring"""
lowercase__ : Tuple = self.top
while node:
yield node.data
lowercase__ : Optional[Any] = node.next
def __str__( self : int ) -> str:
"""simple docstring"""
return "->".join([str(_snake_case ) for item in self] )
def __len__( self : Dict ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def UpperCAmelCase ( self : List[str] ) -> bool:
"""simple docstring"""
return self.top is None
def UpperCAmelCase ( self : int ,_snake_case : T ) -> None:
"""simple docstring"""
lowercase__ : Optional[int] = Node(_snake_case )
if not self.is_empty():
lowercase__ : Tuple = self.top
lowercase__ : Union[str, Any] = node
def UpperCAmelCase ( self : List[str] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top ,_snake_case )
lowercase__ : List[str] = self.top
lowercase__ : Union[str, Any] = self.top.next
return pop_node.data
def UpperCAmelCase ( self : Dict ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def UpperCAmelCase ( self : Tuple ) -> None:
"""simple docstring"""
lowercase__ : Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 302
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ , lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 302
| 1
|
"""simple docstring"""
lowerCAmelCase_ = {}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Dict = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : str = _calculate(days - 1 , __lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Tuple = _calculate(days - 1 , __lowerCamelCase , 0 )
lowercase__ : int = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def __UpperCAmelCase ( __lowerCamelCase = 30 ) -> int:
return _calculate(__lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 302
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "maskformer"
lowerCAmelCase : Any = {"hidden_size": "mask_feature_size"}
lowerCAmelCase : Optional[int] = ["resnet", "swin"]
lowerCAmelCase : str = ["detr"]
def __init__( self : int ,_snake_case : int = 256 ,_snake_case : int = 256 ,_snake_case : float = 0.1 ,_snake_case : bool = False ,_snake_case : Optional[Dict] = None ,_snake_case : Optional[Dict] = None ,_snake_case : float = 0.02 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 20.0 ,_snake_case : Optional[bool] = None ,**_snake_case : Optional[Any] ,) -> Dict:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ : Any = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,)
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[str] = backbone_config.pop('''model_type''' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ : Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(_snake_case ,_snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase__ : Optional[Any] = config_class.from_dict(_snake_case )
lowercase__ : List[Any] = backbone_config
lowercase__ : List[Any] = decoder_config
# main feature dimension for the model
lowercase__ : List[str] = fpn_feature_size
lowercase__ : int = mask_feature_size
# initializer
lowercase__ : str = init_std
lowercase__ : str = init_xavier_std
# Hungarian matcher && loss
lowercase__ : Optional[int] = cross_entropy_weight
lowercase__ : List[Any] = dice_weight
lowercase__ : List[str] = mask_weight
lowercase__ : str = use_auxiliary_loss
lowercase__ : Optional[int] = no_object_weight
lowercase__ : Optional[Any] = output_auxiliary_logits
lowercase__ : Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase__ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return cls(
backbone_config=_snake_case ,decoder_config=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : str ) -> Dict[str, any]:
"""simple docstring"""
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : List[Any] = self.decoder_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 302
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = KandinskyVaaPriorPipeline
lowerCAmelCase : Any = ["prompt"]
lowerCAmelCase : Optional[Any] = ["prompt", "negative_prompt"]
lowerCAmelCase : Optional[int] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowerCAmelCase : Union[str, Any] = False
@property
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return 100
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModelWithProjection(_snake_case )
@property
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : List[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
lowercase__ : Dict = PriorTransformer(**_snake_case )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase__ : List[str] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=224 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=14 ,)
lowercase__ : Optional[Any] = CLIPVisionModelWithProjection(_snake_case )
return model
@property
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_snake_case ,do_normalize=_snake_case ,do_resize=_snake_case ,image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] ,image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] ,resample=3 ,size=224 ,)
return image_processor
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.dummy_prior
lowercase__ : Union[str, Any] = self.dummy_image_encoder
lowercase__ : Optional[int] = self.dummy_text_encoder
lowercase__ : Optional[Any] = self.dummy_tokenizer
lowercase__ : Union[str, Any] = self.dummy_image_processor
lowercase__ : Tuple = UnCLIPScheduler(
variance_type='''fixed_small_log''' ,prediction_type='''sample''' ,num_train_timesteps=1_000 ,clip_sample=_snake_case ,clip_sample_range=10.0 ,)
lowercase__ : Dict = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Tuple=0 ) -> int:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : Optional[int] = torch.manual_seed(_snake_case )
else:
lowercase__ : Any = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Tuple = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = '''cpu'''
lowercase__ : Optional[Any] = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**_snake_case )
lowercase__ : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = pipe(**self.get_dummy_inputs(_snake_case ) )
lowercase__ : Any = output.image_embeds
lowercase__ : List[str] = pipe(
**self.get_dummy_inputs(_snake_case ) ,return_dict=_snake_case ,)[0]
lowercase__ : str = image[0, -10:]
lowercase__ : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
lowercase__ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowercase__ : int = torch_device == '''cpu'''
lowercase__ : Any = True
lowercase__ : str = False
self._test_inference_batch_single_identical(
test_max_difference=_snake_case ,relax_max_difference=_snake_case ,test_mean_pixel_difference=_snake_case ,)
@skip_mps
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = torch_device == '''cpu'''
lowercase__ : Optional[int] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_snake_case ,test_mean_pixel_difference=_snake_case ,)
| 302
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : Dict = [3, 3, 3, 3]
lowercase__ : str = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : List[str] = [4, 4, 4, 4]
lowercase__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
else:
lowercase__ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[int] = 96
elif "small" in model_name:
lowercase__ : Union[str, Any] = 96
elif "base" in model_name:
lowercase__ : Tuple = 1_28
elif "large" in model_name:
lowercase__ : Any = 1_92
elif "xlarge" in model_name:
lowercase__ : Any = 2_56
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 3_52
# set label information
lowercase__ : List[Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ : Optional[int] = '''imagenet-22k-id2label.json'''
else:
lowercase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowercase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ : Dict = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ : Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ : Union[str, Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ : Dict = '''layernorm.bias'''
if "head" in name:
lowercase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__ : List[Any] = '''focalnet.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[str]:
# fmt: off
lowercase__ : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ : Optional[int] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowercase__ : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ : int = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
lowercase__ : List[Any] = get_focalnet_config(__lowerCamelCase )
lowercase__ : Optional[int] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : int = BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowercase__ : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase__ : List[str] = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
lowercase__ : Optional[Any] = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__ : Optional[int] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Dict = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__ : List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 1
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[int]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Dict = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : Any = 2
# Initialize accelerator
lowercase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : List[Any] = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : List[str] = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : int = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : int = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 302
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCAmelCase : int = "ChineseCLIPImageProcessor"
lowerCAmelCase : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple ,_snake_case : str=None ,_snake_case : Union[str, Any]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
lowercase__ : List[Any] = self.image_processor
def __call__( self : List[Any] ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : List[Any]=None ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : List[Any] ,**_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,*_snake_case : Tuple ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
| 302
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = "vit"
def __init__( self : List[str] ,_snake_case : str=768 ,_snake_case : Optional[Any]=12 ,_snake_case : Dict=12 ,_snake_case : Dict=3_072 ,_snake_case : int="gelu" ,_snake_case : Optional[int]=0.0 ,_snake_case : List[str]=0.0 ,_snake_case : str=0.02 ,_snake_case : Optional[Any]=1e-12 ,_snake_case : Optional[Any]=224 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=3 ,_snake_case : Optional[int]=True ,_snake_case : Any=16 ,**_snake_case : Optional[int] ,) -> Any:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : Dict = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Tuple = image_size
lowercase__ : str = patch_size
lowercase__ : str = num_channels
lowercase__ : Optional[int] = qkv_bias
lowercase__ : Tuple = encoder_stride
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = version.parse("1.11" )
@property
def UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1e-4
| 302
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase__ : Any = grid[0]
for row_n in range(1 , len(__lowerCamelCase ) ):
lowercase__ : Optional[int] = grid[row_n]
lowercase__ : Union[str, Any] = fill_row(__lowerCamelCase , __lowerCamelCase )
lowercase__ : str = grid[row_n]
return grid[-1][-1]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowerCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
lowerCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ : str = import_module('''tasks''' )
try:
lowercase__ : List[str] = getattr(__lowerCamelCase , model_args.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowercase__ : Dict[int, str] = dict(enumerate(__lowerCamelCase ) )
lowercase__ : Optional[int] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ : str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase , __lowerCamelCase ) -> Tuple[List[int], List[int]]:
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=2 )
lowercase__ , lowercase__ : Tuple = preds.shape
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
lowercase__ : Tuple = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase ) -> Dict:
lowercase__ , lowercase__ : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase__ : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase__ : Optional[int] = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(__lowerCamelCase )
lowercase__ , lowercase__ : Tuple = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "roberta"
def __init__( self : Dict ,_snake_case : Tuple=50_265 ,_snake_case : Any=768 ,_snake_case : Any=12 ,_snake_case : List[Any]=12 ,_snake_case : int=3_072 ,_snake_case : str="gelu" ,_snake_case : str=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=512 ,_snake_case : Any=2 ,_snake_case : Dict=0.02 ,_snake_case : Tuple=1e-12 ,_snake_case : List[str]=1 ,_snake_case : Optional[Any]=0 ,_snake_case : List[Any]=2 ,_snake_case : Union[str, Any]="absolute" ,_snake_case : Union[str, Any]=True ,_snake_case : int=None ,**_snake_case : List[str] ,) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
lowercase__ : str = vocab_size
lowercase__ : str = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Dict = hidden_act
lowercase__ : Tuple = intermediate_size
lowercase__ : str = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Tuple = type_vocab_size
lowercase__ : str = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : str = position_embedding_type
lowercase__ : str = use_cache
lowercase__ : List[str] = classifier_dropout
class __A ( A_ ):
'''simple docstring'''
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 302
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[int]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Dict = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : Any = 2
# Initialize accelerator
lowercase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : List[Any] = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : List[str] = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : int = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : int = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
lowercase__ : Optional[int] = tmp_path / '''cache'''
lowercase__ : List[Any] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Tuple = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : str = tmp_path / '''cache'''
lowercase__ : Tuple = {'''text''': '''string'''}
lowercase__ : Optional[int] = features.copy() if features else default_expected_features
lowercase__ : List[Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : int = TextDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Any = tmp_path / '''cache'''
lowercase__ : int = {'''text''': '''string'''}
lowercase__ : Tuple = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , split=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
if issubclass(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : Dict = text_path
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : Tuple = [text_path]
lowercase__ : List[str] = tmp_path / '''cache'''
lowercase__ : List[str] = {'''text''': '''string'''}
lowercase__ : List[Any] = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_dataset(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=("train",) ) -> List[str]:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
for split in splits:
lowercase__ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : List[Any] = tmp_path / '''cache'''
lowercase__ : List[str] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[int] = TextDatasetReader({'''train''': text_path} , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : List[str] = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowercase__ : str = {'''text''': '''string'''}
lowercase__ : Dict = features.copy() if features else default_expected_features
lowercase__ : Any = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : Optional[Any] = TextDatasetReader({'''train''': text_path} , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
if split:
lowercase__ : Any = {split: text_path}
else:
lowercase__ : Dict = '''train'''
lowercase__ : Optional[int] = {'''train''': text_path, '''test''': text_path}
lowercase__ : Any = tmp_path / '''cache'''
lowercase__ : Union[str, Any] = {'''text''': '''string'''}
lowercase__ : Optional[Any] = TextDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_text_datasetdict(__lowerCamelCase , __lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 302
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : str = tokenizer('''This is me''' ,return_tensors='''pt''' )
lowercase__ : Tuple = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ : Optional[int] = model.generate(**_snake_case )
lowercase__ : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ : int = model_reloaded.generate(**_snake_case )
self.assertTrue(torch.allclose(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_snake_case ):
model.save_pretrained(_snake_case )
lowercase__ : int = model.reverse_bettertransformer()
model.save_pretrained(_snake_case )
| 302
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Any:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
lowercase__ : List[Any] = torch.load(__lowerCamelCase , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
lowercase__ : int = convert_pytorch_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowercase__ : Dict = convert_pytorch_sharded_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
return flax_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(__lowerCamelCase ) -> bool:
return len(set(__lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowercase__ : int = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowercase__ : Any = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowercase__ : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ : Optional[int] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowercase__ : List[str] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# convert pytorch tensor to numpy
lowercase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowercase__ : str = flax_model.params['''params''']
else:
lowercase__ : Optional[int] = flax_model.params
lowercase__ : Optional[Any] = flatten_dict(__lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Tuple = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__lowerCamelCase )
lowercase__ : int = {}
lowercase__ : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : Optional[Any] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : List[str] = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowercase__ : int = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : Tuple = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Any = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
import torch
# Load the index
lowercase__ : Dict = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowercase__ : Optional[int] = torch.load(__lowerCamelCase )
lowercase__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Optional[Any] = flax_model.params['''params''']
lowercase__ : List[Any] = flatten_dict(__lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowercase__ : Union[str, Any] = flax_model.params
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : List[str] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Tuple = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : str = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
continue
if "var" in flax_key[-1]:
lowercase__ : str = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : List[str] = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
lowercase__ : Optional[int] = getattr(__lowerCamelCase , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCamelCase , '''rb''' ) as state_f:
try:
lowercase__ : str = from_bytes(__lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase__ : Any = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa , __lowerCamelCase ) ).values()
if any(__lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase__ : Union[str, Any] = jax.tree_util.tree_map(
lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCamelCase )
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : List[str] = pt_model.state_dict()
lowercase__ : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowercase__ : int = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowercase__ : List[str] = []
lowercase__ : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ : List[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
lowercase__ : Optional[int] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCamelCase ) not in pt_model_dict:
# conv layer
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : List[str] = jnp.transpose(__lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ) not in pt_model_dict:
# linear layer
lowercase__ : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowercase__ : Any = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowercase__ : Union[str, Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowercase__ : Dict = '''.'''.join(__lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowercase__ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowercase__ : str = key.split('''.''' )
lowercase__ : Optional[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowercase__ : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowercase__ : str = key_components[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[int] = key_components[:-3] + [name]
lowercase__ : List[str] = '''.'''.join(__lowerCamelCase )
lowercase__ : List[Any] = key
if flax_key in special_pt_names:
lowercase__ : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowercase__ : List[str] = np.asarray(__lowerCamelCase ) if not isinstance(__lowerCamelCase , np.ndarray ) else flax_tensor
lowercase__ : List[str] = torch.from_numpy(__lowerCamelCase )
# remove from missing keys
missing_keys.remove(__lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCamelCase )
pt_model.load_state_dict(__lowerCamelCase )
# re-transform missing_keys to list
lowercase__ : Optional[Any] = list(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(__lowerCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 302
| 1
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "maskformer"
lowerCAmelCase : Any = {"hidden_size": "mask_feature_size"}
lowerCAmelCase : Optional[int] = ["resnet", "swin"]
lowerCAmelCase : str = ["detr"]
def __init__( self : int ,_snake_case : int = 256 ,_snake_case : int = 256 ,_snake_case : float = 0.1 ,_snake_case : bool = False ,_snake_case : Optional[Dict] = None ,_snake_case : Optional[Dict] = None ,_snake_case : float = 0.02 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 20.0 ,_snake_case : Optional[bool] = None ,**_snake_case : Optional[Any] ,) -> Dict:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ : Any = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,)
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[str] = backbone_config.pop('''model_type''' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ : Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(_snake_case ,_snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase__ : Optional[Any] = config_class.from_dict(_snake_case )
lowercase__ : List[Any] = backbone_config
lowercase__ : List[Any] = decoder_config
# main feature dimension for the model
lowercase__ : List[str] = fpn_feature_size
lowercase__ : int = mask_feature_size
# initializer
lowercase__ : str = init_std
lowercase__ : str = init_xavier_std
# Hungarian matcher && loss
lowercase__ : Optional[int] = cross_entropy_weight
lowercase__ : List[Any] = dice_weight
lowercase__ : List[str] = mask_weight
lowercase__ : str = use_auxiliary_loss
lowercase__ : Optional[int] = no_object_weight
lowercase__ : Optional[Any] = output_auxiliary_logits
lowercase__ : Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase__ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return cls(
backbone_config=_snake_case ,decoder_config=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : str ) -> Dict[str, any]:
"""simple docstring"""
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : List[Any] = self.decoder_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 302
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Any ,_snake_case : UNetaDModel ,_snake_case : UNetaDModel ,_snake_case : DDPMScheduler ,_snake_case : Any ,) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = value_function
lowercase__ : Optional[int] = unet
lowercase__ : Tuple = scheduler
lowercase__ : Dict = env
lowercase__ : int = env.get_dataset()
lowercase__ : Dict = {}
for key in self.data.keys():
try:
lowercase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ : List[Any] = {}
for key in self.data.keys():
try:
lowercase__ : str = self.data[key].std()
except: # noqa: E722
pass
lowercase__ : Tuple = env.observation_space.shape[0]
lowercase__ : Optional[int] = env.action_space.shape[0]
def UpperCAmelCase ( self : str ,_snake_case : Any ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase ( self : Dict ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
if type(_snake_case ) is dict:
return {k: self.to_torch(_snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(_snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(_snake_case ,device=self.unet.device )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
lowercase__ : List[Any] = val.clone()
return x_in
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = x.shape[0]
lowercase__ : Dict = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ : Dict = torch.full((batch_size,) ,_snake_case ,device=self.unet.device ,dtype=torch.long )
for _ in range(_snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ : int = self.value_function(x.permute(0 ,2 ,1 ) ,_snake_case ).sample
lowercase__ : Optional[Any] = torch.autograd.grad([y.sum()] ,[x] )[0]
lowercase__ : List[str] = self.scheduler._get_variance(_snake_case )
lowercase__ : Union[str, Any] = torch.exp(0.5 * posterior_variance )
lowercase__ : Optional[int] = model_std * grad
lowercase__ : Optional[Any] = 0
lowercase__ : str = x.detach()
lowercase__ : Dict = x + scale * grad
lowercase__ : str = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.unet(x.permute(0 ,2 ,1 ) ,_snake_case ).sample.permute(0 ,2 ,1 )
# TODO: verify deprecation of this kwarg
lowercase__ : Dict = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ,predict_epsilon=_snake_case )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase__ : Dict = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.to_torch(_snake_case )
return x, y
def __call__( self : Union[str, Any] ,_snake_case : Any ,_snake_case : Tuple=64 ,_snake_case : Any=32 ,_snake_case : Optional[Any]=2 ,_snake_case : str=0.1 ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = self.normalize(_snake_case ,'''observations''' )
lowercase__ : Tuple = obs[None].repeat(_snake_case ,axis=0 )
lowercase__ : Dict = {0: self.to_torch(_snake_case )}
lowercase__ : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ : Optional[int] = randn_tensor(_snake_case ,device=self.unet.device )
lowercase__ : Tuple = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : str = self.to_torch(_snake_case )
# run the diffusion process
lowercase__ , lowercase__ : int = self.run_diffusion(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# sort output trajectories by value
lowercase__ : Optional[Any] = y.argsort(0 ,descending=_snake_case ).squeeze()
lowercase__ : str = x[sorted_idx]
lowercase__ : str = sorted_values[:, :, : self.action_dim]
lowercase__ : Optional[int] = actions.detach().cpu().numpy()
lowercase__ : List[str] = self.de_normalize(_snake_case ,key='''actions''' )
# select the action with the highest value
if y is not None:
lowercase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ : str = np.random.randint(0 ,_snake_case )
lowercase__ : int = denorm_actions[selected_index, 0]
return denorm_actions
| 302
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/krishnap25/mauve''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/krishnap25/mauve'''] ,reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : List[str]=None ,_snake_case : Tuple=None ,_snake_case : List[Any]=None ,_snake_case : Any=None ,_snake_case : Optional[int]="auto" ,_snake_case : Optional[int]=-1 ,_snake_case : Optional[int]=0.9 ,_snake_case : Any=5 ,_snake_case : Dict=500 ,_snake_case : Optional[int]="gpt2-large" ,_snake_case : Optional[Any]=-1 ,_snake_case : Tuple=1_024 ,_snake_case : Optional[int]=25 ,_snake_case : Dict=5 ,_snake_case : int=True ,_snake_case : Union[str, Any]=25 ,) -> Any:
"""simple docstring"""
lowercase__ : Any = compute_mauve(
p_text=_snake_case ,q_text=_snake_case ,p_features=_snake_case ,q_features=_snake_case ,p_tokens=_snake_case ,q_tokens=_snake_case ,num_buckets=_snake_case ,pca_max_data=_snake_case ,kmeans_explained_var=_snake_case ,kmeans_num_redo=_snake_case ,kmeans_max_iter=_snake_case ,featurize_model_name=_snake_case ,device_id=_snake_case ,max_text_length=_snake_case ,divergence_curve_discretization_size=_snake_case ,mauve_scaling_factor=_snake_case ,verbose=_snake_case ,seed=_snake_case ,)
return out
| 302
| 1
|
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase_ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCAmelCase_ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : Tuple = list(state_dict.keys() )
for name in state_dict_keys:
lowercase__ : Optional[Any] = state_dict.pop(__lowerCamelCase )
# emb -> embedding
if name.startswith('''emb.''' ):
lowercase__ : int = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
lowercase__ : int = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
lowercase__ : List[str] = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , __lowerCamelCase )
# ffn -> feed_forward
lowercase__ : Optional[Any] = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , __lowerCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
lowercase__ : Dict = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
lowercase__ : Any = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
lowercase__ : Optional[Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
lowercase__ : Dict = '''rwkv.''' + name
lowercase__ : Union[str, Any] = weight
return state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=None ) -> List[str]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
lowercase__ : List[Any] = 5_02_77
lowercase__ : str = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
lowercase__ : Optional[int] = PreTrainedTokenizerFast(tokenizer_file=__lowerCamelCase )
lowercase__ : Dict = len(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
# 2. Build the config
lowercase__ : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowercase__ : Union[str, Any] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
lowercase__ : Optional[int] = RwkvConfig(
vocab_size=__lowerCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCamelCase )
# 3. Download model file then convert state_dict
lowercase__ : Dict = hf_hub_download(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = torch.load(__lowerCamelCase , map_location='''cpu''' )
lowercase__ : Union[str, Any] = convert_state_dict(__lowerCamelCase )
# 4. Split in shards and save
lowercase__ , lowercase__ : Tuple = shard_checkpoint(__lowerCamelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if index is not None:
lowercase__ : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
# Save the index as well
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
lowercase__ : Union[str, Any] = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + '''\n'''
f.write(__lowerCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
lowercase__ : str = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowercase__ : List[Any] = torch.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained(__lowerCamelCase )
model.push_to_hub(__lowerCamelCase , max_shard_size='''2GB''' )
tokenizer.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCAmelCase_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 302
|
"""simple docstring"""
import math
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Tuple = 0
lowercase__ : Tuple = 0
while num > 0:
lowercase__ : int = num % 8
lowercase__ : Tuple = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
lowercase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(__lowerCamelCase )}"""
def __UpperCAmelCase ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = "vivit"
def __init__( self : Dict ,_snake_case : List[Any]=224 ,_snake_case : List[Any]=32 ,_snake_case : Tuple=[2, 16, 16] ,_snake_case : Any=3 ,_snake_case : List[str]=768 ,_snake_case : Optional[Any]=12 ,_snake_case : Any=12 ,_snake_case : Dict=3_072 ,_snake_case : str="gelu_fast" ,_snake_case : Any=0.0 ,_snake_case : Tuple=0.0 ,_snake_case : Union[str, Any]=0.02 ,_snake_case : Optional[Any]=1e-06 ,_snake_case : Any=True ,**_snake_case : Any ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Any = num_frames
lowercase__ : Dict = tubelet_size
lowercase__ : int = num_channels
lowercase__ : str = qkv_bias
super().__init__(**_snake_case )
| 302
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase_ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase_ = 'UperNetConfig'
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : int ,_snake_case : int ,_snake_case : Union[int, Tuple[int, int]] ,_snake_case : Union[int, Tuple[int, int], str] = 0 ,_snake_case : bool = False ,_snake_case : Union[int, Tuple[int, int]] = 1 ,) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = nn.Convad(
in_channels=_snake_case ,out_channels=_snake_case ,kernel_size=_snake_case ,padding=_snake_case ,bias=_snake_case ,dilation=_snake_case ,)
lowercase__ : Tuple = nn.BatchNormad(_snake_case )
lowercase__ : List[str] = nn.ReLU()
def UpperCAmelCase ( self : str ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.conv(_snake_case )
lowercase__ : List[str] = self.batch_norm(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : int ,_snake_case : int ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = [
nn.AdaptiveAvgPoolad(_snake_case ),
UperNetConvModule(_snake_case ,_snake_case ,kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Any = input
for layer in self.layers:
lowercase__ : int = layer(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : Tuple[int, ...] ,_snake_case : int ,_snake_case : int ,_snake_case : bool ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = pool_scales
lowercase__ : Dict = align_corners
lowercase__ : Optional[Any] = in_channels
lowercase__ : Optional[Any] = channels
lowercase__ : int = []
for i, pool_scale in enumerate(_snake_case ):
lowercase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_snake_case ,in_channels=_snake_case ,channels=_snake_case )
self.blocks.append(_snake_case )
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : torch.Tensor ) -> List[torch.Tensor]:
"""simple docstring"""
lowercase__ : int = []
for ppm in self.blocks:
lowercase__ : Any = ppm(_snake_case )
lowercase__ : int = nn.functional.interpolate(
_snake_case ,size=x.size()[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
ppm_outs.append(_snake_case )
return ppm_outs
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ : str = config
lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__ : Optional[Any] = in_channels
lowercase__ : Any = config.hidden_size
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
# PSP Module
lowercase__ : Dict = UperNetPyramidPoolingModule(
self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,)
lowercase__ : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
# FPN Module
lowercase__ : Any = nn.ModuleList()
lowercase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__ : List[Any] = UperNetConvModule(_snake_case ,self.channels ,kernel_size=1 )
lowercase__ : Optional[int] = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 )
self.lateral_convs.append(_snake_case )
self.fpn_convs.append(_snake_case )
lowercase__ : int = UperNetConvModule(
len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Dict = inputs[-1]
lowercase__ : Optional[int] = [x]
psp_outs.extend(self.psp_modules(_snake_case ) )
lowercase__ : Optional[Any] = torch.cat(_snake_case ,dim=1 )
lowercase__ : List[str] = self.bottleneck(_snake_case )
return output
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_snake_case ) )
# build top-down path
lowercase__ : List[Any] = len(_snake_case )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Union[str, Any] = laterals[i - 1].shape[2:]
lowercase__ : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] ,size=_snake_case ,mode='''bilinear''' ,align_corners=self.align_corners )
# build outputs
lowercase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Any = nn.functional.interpolate(
fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
lowercase__ : Any = torch.cat(_snake_case ,dim=1 )
lowercase__ : Any = self.fpn_bottleneck(_snake_case )
lowercase__ : str = self.classifier(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : List[Any] ,_snake_case : int = 2 ,_snake_case : int = 3 ,_snake_case : Union[int, Tuple[int, int]] = 1 ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = config
lowercase__ : Dict = config.auxiliary_in_channels
lowercase__ : Optional[int] = config.auxiliary_channels
lowercase__ : List[Any] = config.auxiliary_num_convs
lowercase__ : List[Any] = config.auxiliary_concat_input
lowercase__ : str = in_index
lowercase__ : Any = (kernel_size // 2) * dilation
lowercase__ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
if self.num_convs == 0:
lowercase__ : List[str] = nn.Identity()
else:
lowercase__ : Dict = nn.Sequential(*_snake_case )
if self.concat_input:
lowercase__ : int = UperNetConvModule(
self.in_channels + self.channels ,self.channels ,kernel_size=_snake_case ,padding=kernel_size // 2 )
lowercase__ : List[str] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : str = encoder_hidden_states[self.in_index]
lowercase__ : List[str] = self.convs(_snake_case )
if self.concat_input:
lowercase__ : Any = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) )
lowercase__ : Dict = self.classifier(_snake_case )
return output
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = UperNetConfig
lowerCAmelCase : str = "pixel_values"
lowerCAmelCase : Dict = True
def UpperCAmelCase ( self : int ,_snake_case : str ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : str=False ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[Any] = value
lowerCAmelCase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,A_ ,)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Tuple ) -> int:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__ : Any = UperNetHead(_snake_case ,in_channels=self.backbone.channels )
lowercase__ : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_snake_case ,config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs(
_snake_case ,output_hidden_states=_snake_case ,output_attentions=_snake_case )
lowercase__ : Optional[int] = outputs.feature_maps
lowercase__ : Tuple = self.decode_head(_snake_case )
lowercase__ : Optional[int] = nn.functional.interpolate(_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : List[str] = None
if self.auxiliary_head is not None:
lowercase__ : str = self.auxiliary_head(_snake_case )
lowercase__ : Dict = nn.functional.interpolate(
_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__ : Union[str, Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__ : Tuple = (logits,) + outputs[1:]
else:
lowercase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 302
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : List[str] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A ( A_ ,A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = StableDiffusionLatentUpscalePipeline
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
lowerCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
lowerCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase : List[Any] = frozenset([] )
lowerCAmelCase : Any = True
@property
def UpperCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = 1
lowercase__ : Dict = 4
lowercase__ : Tuple = (16, 16)
lowercase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_snake_case )
return image
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Optional[Any] = UNetaDConditionModel(
act_fn='''gelu''' ,attention_head_dim=8 ,norm_num_groups=_snake_case ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) ,in_channels=8 ,mid_block_type=_snake_case ,only_cross_attention=_snake_case ,out_channels=5 ,resnet_time_scale_shift='''scale_shift''' ,time_embedding_type='''fourier''' ,timestep_post_act='''gelu''' ,up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') ,)
lowercase__ : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
lowercase__ : Optional[int] = EulerDiscreteScheduler(prediction_type='''sample''' )
lowercase__ : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act='''quick_gelu''' ,projection_dim=512 ,)
lowercase__ : List[str] = CLIPTextModel(_snake_case )
lowercase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : Dict = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Union[str, Any]=0 ) -> Dict:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : Union[str, Any] = torch.manual_seed(_snake_case )
else:
lowercase__ : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = '''cpu'''
lowercase__ : Any = self.get_dummy_components()
lowercase__ : Dict = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = self.get_dummy_inputs(_snake_case )
lowercase__ : Optional[int] = pipe(**_snake_case ).images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 256, 256, 3) )
lowercase__ : Dict = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
lowercase__ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case ,1e-3 )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**_snake_case )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Dict = self.get_dummy_inputs(_snake_case )
lowercase__ : int = 2
lowercase__ : int = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ : int = getattr(_snake_case ,scheduler_enum.name )
lowercase__ : Dict = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ : str = pipe(**_snake_case )[0]
outputs.append(_snake_case )
assert check_same_shape(_snake_case )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = torch.manual_seed(33 )
lowercase__ : Tuple = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ,torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowercase__ : Any = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' ,torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowercase__ : Dict = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
lowercase__ : List[str] = pipe(_snake_case ,generator=_snake_case ,output_type='''latent''' ).images
lowercase__ : Tuple = upscaler(
prompt=_snake_case ,image=_snake_case ,num_inference_steps=20 ,guidance_scale=0 ,generator=_snake_case ,output_type='''np''' ,).images[0]
lowercase__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ : int = torch.manual_seed(33 )
lowercase__ : Any = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' ,torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowercase__ : Any = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
lowercase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
lowercase__ : Union[str, Any] = upscaler(
prompt=_snake_case ,image=_snake_case ,num_inference_steps=20 ,guidance_scale=0 ,generator=_snake_case ,output_type='''np''' ,).images[0]
lowercase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 302
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase_ = _symbol_database.Default()
lowerCAmelCase_ = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowerCAmelCase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase_ = None
lowerCAmelCase_ = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase_ = 45
lowerCAmelCase_ = 1_581
lowerCAmelCase_ = 1_517
lowerCAmelCase_ = 1_570
lowerCAmelCase_ = 1_584
lowerCAmelCase_ = 1_793
lowerCAmelCase_ = 1_795
lowerCAmelCase_ = 1_916
lowerCAmelCase_ = 1_864
lowerCAmelCase_ = 1_905
lowerCAmelCase_ = 1_919
lowerCAmelCase_ = 2_429
lowerCAmelCase_ = 2_208
lowerCAmelCase_ = 2_418
lowerCAmelCase_ = 2_323
lowerCAmelCase_ = 2_407
# @@protoc_insertion_point(module_scope)
| 302
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Dict ,_snake_case : Dict ,_snake_case : Optional[int]=13 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : List[str]=4 ,_snake_case : List[Any]=[10, 20, 30, 40] ,_snake_case : Optional[int]=[2, 2, 3, 2] ,_snake_case : List[Any]=True ,_snake_case : int=True ,_snake_case : Dict=37 ,_snake_case : Optional[int]="gelu" ,_snake_case : Union[str, Any]=10 ,_snake_case : List[Any]=0.02 ,_snake_case : List[Any]=["stage2", "stage3", "stage4"] ,_snake_case : Any=[2, 3, 4] ,_snake_case : str=None ,) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = parent
lowercase__ : str = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Union[str, Any] = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : int = hidden_sizes
lowercase__ : Dict = depths
lowercase__ : Optional[int] = is_training
lowercase__ : Any = use_labels
lowercase__ : Optional[int] = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : int = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : List[str] = out_features
lowercase__ : Optional[int] = out_indices
lowercase__ : int = scope
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Dict ,_snake_case : Any ,_snake_case : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = ConvNextVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Any = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : List[str] ) -> str:
"""simple docstring"""
lowercase__ : Tuple = ConvNextVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : int ,_snake_case : Any ,_snake_case : List[Any] ,_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = ConvNextVaBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : Dict = None
lowercase__ : Any = ConvNextVaBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : str = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Any = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Tuple = False
lowerCAmelCase : Dict = False
lowerCAmelCase : Dict = False
lowerCAmelCase : str = False
lowerCAmelCase : Tuple = False
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = ConvNextVaModelTester(self )
lowercase__ : List[Any] = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Any = True
if model_class.__name__ in [
*get_values(_snake_case ),
*get_values(_snake_case ),
]:
continue
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.train()
lowercase__ : List[Any] = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
lowercase__ : Dict = model(**_snake_case ).loss
loss.backward()
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Union[str, Any] = False
lowercase__ : List[Any] = True
if (
model_class.__name__
in [*get_values(_snake_case ), *get_values(_snake_case )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : Tuple = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
lowercase__ : Optional[int] = model(**_snake_case ).loss
loss.backward()
def UpperCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(_snake_case )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Tuple ,_snake_case : int ,_snake_case : Any ):
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = ConvNextVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_snake_case )
lowercase__ : Any = self.default_image_processor
lowercase__ : Any = prepare_img()
lowercase__ : List[Any] = preprocessor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**_snake_case )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Optional[Any] = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 302
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 1
|
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
lowercase__ : Tuple = FileLock(str(tmpdir / '''foo.lock''' ) )
lowercase__ : str = 0.0_1
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
lowercase__ : Dict = time.time()
locka.acquire(__lowerCamelCase )
assert time.time() - _start > timeout
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
lowercase__ : Any = '''a''' * 10_00 + '''.lock'''
lowercase__ : Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__lowerCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
lowercase__ : Any = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
locka.acquire(0 )
| 302
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = '''The dog is cute and lives in the garden house'''
lowercase__ : int = jnp.array([tokenizer.encode(_snake_case )] )
lowercase__ : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase__ : Optional[Any] = model(_snake_case )['''last_hidden_state''']
self.assertEqual(output.shape ,_snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_snake_case ,atol=1e-3 ) )
| 302
| 1
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class __A ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Optional[int] = pipe.dual_guided(
prompt='''first prompt''' ,image=_snake_case ,text_to_image_strength=0.75 ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
lowercase__ : Dict = VersatileDiffusionPipeline.from_pretrained(_snake_case ,torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Union[str, Any] = generator.manual_seed(0 )
lowercase__ : Any = pipe.dual_guided(
prompt='''first prompt''' ,image=_snake_case ,text_to_image_strength=0.75 ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = '''cyberpunk 2077'''
lowercase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase__ : Union[str, Any] = torch.manual_seed(0 )
lowercase__ : Optional[Any] = pipe.dual_guided(
prompt=_snake_case ,image=_snake_case ,text_to_image_strength=0.75 ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ,).images
lowercase__ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : str = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__ : List[Any] = '''A painting of a squirrel eating a burger '''
lowercase__ : Union[str, Any] = torch.manual_seed(0 )
lowercase__ : Optional[int] = pipe.text_to_image(
prompt=_snake_case ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ).images
lowercase__ : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__ : Optional[int] = pipe.image_variation(_snake_case ,generator=_snake_case ,output_type='''numpy''' ).images
lowercase__ : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Dict = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 302
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = '#'
class __A :
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
lowercase__ : dict = {}
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> None:
"""simple docstring"""
lowercase__ : str = self._trie
for char in text:
if char not in trie:
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[Any] = trie[char]
lowercase__ : Dict = True
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> tuple | list:
"""simple docstring"""
lowercase__ : Optional[Any] = self._trie
for char in prefix:
if char in trie:
lowercase__ : Union[str, Any] = trie[char]
else:
return []
return self._elements(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : dict ) -> tuple:
"""simple docstring"""
lowercase__ : str = []
for c, v in d.items():
lowercase__ : List[Any] = [''' '''] if c == END else [(c + s) for s in self._elements(_snake_case )]
result.extend(_snake_case )
return tuple(_snake_case )
lowerCAmelCase_ = Trie()
lowerCAmelCase_ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __UpperCAmelCase ( __lowerCamelCase ) -> tuple:
lowercase__ : List[Any] = trie.find_word(__lowerCamelCase )
return tuple(string + word for word in suffixes )
def __UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Any:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : Any = tmp_path / '''cache'''
lowercase__ : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : str = tmp_path / '''cache'''
lowercase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase__ : Tuple = features.copy() if features else default_expected_features
lowercase__ : Any = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
lowercase__ : Union[str, Any] = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : Optional[Any] = tmp_path / '''cache'''
lowercase__ : Tuple = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
lowercase__ : Optional[int] = iter_sql_file(__lowerCamelCase )
lowercase__ : str = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : Optional[Any] = tmp_path / '''cache'''
lowercase__ : List[str] = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Optional[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
lowercase__ : Any = iter_sql_file(__lowerCamelCase )
lowercase__ : List[str] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Optional[int] = tmp_path / '''cache'''
lowercase__ : List[str] = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 302
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase_ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase_ = 'RegNetConfig'
# Base docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = 'tabby, tabby cat'
lowerCAmelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = nn.Convad(
_snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,)
lowercase__ : List[Any] = nn.BatchNormad(_snake_case )
lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.convolution(_snake_case )
lowercase__ : Tuple = self.normalization(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
lowercase__ : str = config.num_channels
def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ : Optional[int] = self.embedder(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case )
lowercase__ : Any = nn.BatchNormad(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.convolution(_snake_case )
lowercase__ : Optional[int] = self.normalization(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ : Dict = nn.Sequential(
nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.pooler(_snake_case )
lowercase__ : Union[str, Any] = self.attention(_snake_case )
lowercase__ : List[str] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = in_channels != out_channels or stride != 1
lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width )
lowercase__ : str = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : Optional[int] = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : str = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = hidden_state
lowercase__ : Union[str, Any] = self.layer(_snake_case )
lowercase__ : List[Any] = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = in_channels != out_channels or stride != 1
lowercase__ : List[str] = max(1 ,out_channels // config.groups_width )
lowercase__ : Tuple = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : str = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : str = hidden_state
lowercase__ : Optional[Any] = self.layer(_snake_case )
lowercase__ : int = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : str = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.layers(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ):
self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : int = hidden_states + (hidden_state,)
lowercase__ : Any = stage_module(_snake_case )
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = RegNetConfig
lowerCAmelCase : List[Any] = "regnet"
lowerCAmelCase : Optional[int] = "pixel_values"
lowerCAmelCase : Union[str, Any] = True
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : str = value
lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Any = config
lowercase__ : List[str] = RegNetEmbeddings(_snake_case )
lowercase__ : Any = RegNetEncoder(_snake_case )
lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = self.embedder(_snake_case )
lowercase__ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : str = encoder_outputs[0]
lowercase__ : Optional[int] = self.pooler(_snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Optional[Any] = config.num_labels
lowercase__ : int = RegNetModel(_snake_case )
# classification head
lowercase__ : str = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Union[str, Any] = self.classifier(_snake_case )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Dict = '''single_label_classification'''
else:
lowercase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Tuple = CrossEntropyLoss()
lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Any = BCEWithLogitsLoss()
lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case )
if not return_dict:
lowercase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __A :
'''simple docstring'''
lowerCAmelCase : Tuple = LEDConfig
lowerCAmelCase : Union[str, Any] = {}
lowerCAmelCase : Union[str, Any] = "gelu"
def __init__( self : List[Any] ,_snake_case : Tuple ,_snake_case : Dict=13 ,_snake_case : Tuple=7 ,_snake_case : List[str]=True ,_snake_case : str=False ,_snake_case : List[Any]=99 ,_snake_case : str=32 ,_snake_case : Optional[Any]=2 ,_snake_case : Optional[int]=4 ,_snake_case : List[str]=37 ,_snake_case : Any=0.1 ,_snake_case : Union[str, Any]=0.1 ,_snake_case : Dict=20 ,_snake_case : List[str]=2 ,_snake_case : Any=1 ,_snake_case : Optional[Any]=0 ,_snake_case : str=4 ,) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Tuple = parent
lowercase__ : str = batch_size
lowercase__ : Optional[Any] = seq_length
lowercase__ : Any = is_training
lowercase__ : Union[str, Any] = use_labels
lowercase__ : Optional[Any] = vocab_size
lowercase__ : str = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : int = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : Union[str, Any] = eos_token_id
lowercase__ : List[str] = pad_token_id
lowercase__ : str = bos_token_id
lowercase__ : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowercase__ : Dict = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowercase__ : Dict = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
lowercase__ : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
lowercase__ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : int = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,attention_window=self.attention_window ,**self.config_updates ,)
lowercase__ : List[Any] = prepare_led_inputs_dict(_snake_case ,_snake_case ,_snake_case )
lowercase__ : Optional[int] = tf.concat(
[tf.zeros_like(_snake_case )[:, :-1], tf.ones_like(_snake_case )[:, -1:]] ,axis=-1 ,)
lowercase__ : Union[str, Any] = global_attention_mask
return config, inputs_dict
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[str] ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Any = TFLEDModel(config=_snake_case ).get_decoder()
lowercase__ : Optional[Any] = inputs_dict['''input_ids''']
lowercase__ : Dict = input_ids[:1, :]
lowercase__ : str = inputs_dict['''attention_mask'''][:1, :]
lowercase__ : Dict = 1
# first forward pass
lowercase__ : Union[str, Any] = model(_snake_case ,attention_mask=_snake_case ,use_cache=_snake_case )
lowercase__ , lowercase__ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ : Tuple = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase__ : Dict = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
lowercase__ : Dict = tf.concat([input_ids, next_tokens] ,axis=-1 )
lowercase__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
lowercase__ : Union[str, Any] = model(_snake_case ,attention_mask=_snake_case )[0]
lowercase__ : Dict = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
lowercase__ : Optional[int] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
lowercase__ : Tuple = output_from_no_past[:, -3:, random_slice_idx]
lowercase__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1e-3 )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
if attention_mask is None:
lowercase__ : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase__ : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase : Any = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase : Tuple = True
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Optional[Any] = False
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : str = TFLEDModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = tf.zeros_like(inputs_dict['''attention_mask'''] )
lowercase__ : Any = 2
lowercase__ : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices ,1 ,inputs_dict['''global_attention_mask'''] ,)
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = self.model_tester.seq_length
lowercase__ : List[str] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_snake_case : str ):
lowercase__ : List[str] = outputs.decoder_attentions
self.assertEqual(len(_snake_case ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
def check_encoder_attentions_output(_snake_case : int ):
lowercase__ : Any = [t.numpy() for t in outputs.encoder_attentions]
lowercase__ : int = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_snake_case ) ,self.model_tester.num_hidden_layers )
self.assertEqual(len(_snake_case ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_length, seq_length] ,)
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] ,)
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
lowercase__ : Any = False
lowercase__ : Optional[Any] = False
lowercase__ : Tuple = model_class(_snake_case )
lowercase__ : Any = model(self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : int = len(_snake_case )
self.assertEqual(config.output_hidden_states ,_snake_case )
check_encoder_attentions_output(_snake_case )
if self.is_encoder_decoder:
lowercase__ : Any = model_class(_snake_case )
lowercase__ : int = model(self._prepare_for_class(_snake_case ,_snake_case ) )
self.assertEqual(config.output_hidden_states ,_snake_case )
check_decoder_attentions_output(_snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase__ : Union[str, Any] = True
lowercase__ : Dict = model_class(_snake_case )
lowercase__ : Optional[int] = model(self._prepare_for_class(_snake_case ,_snake_case ) )
self.assertEqual(config.output_hidden_states ,_snake_case )
check_encoder_attentions_output(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Tuple = True
lowercase__ : int = True
lowercase__ : Optional[Any] = model_class(_snake_case )
lowercase__ : Any = model(self._prepare_for_class(_snake_case ,_snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(_snake_case ) )
self.assertEqual(model.config.output_hidden_states ,_snake_case )
check_encoder_attentions_output(_snake_case )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
return tf.constant(__lowerCamelCase , dtype=tf.intaa )
lowerCAmelCase_ = 1E-4
@slow
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
lowercase__ : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
lowercase__ : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
lowercase__ : Optional[int] = prepare_led_inputs_dict(model.config ,_snake_case ,_snake_case )
lowercase__ : Tuple = model(**_snake_case )[0]
lowercase__ : Optional[Any] = (1, 1_024, 768)
self.assertEqual(output.shape ,_snake_case )
# change to expected output here
lowercase__ : Dict = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,_snake_case ,atol=1e-3 )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
lowercase__ : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
lowercase__ : List[str] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
lowercase__ : List[Any] = prepare_led_inputs_dict(model.config ,_snake_case ,_snake_case )
lowercase__ : Optional[int] = model(**_snake_case )[0]
lowercase__ : List[Any] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape ,_snake_case )
# change to expected output here
lowercase__ : Dict = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] ,)
tf.debugging.assert_near(output[:, :3, :3] ,_snake_case ,atol=1e-3 ,rtol=1e-3 )
| 302
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = 1.6021E-19 # units = C
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('T')
class __A ( Generic[T] ):
'''simple docstring'''
lowerCAmelCase : deque[T] # Cache store of keys
lowerCAmelCase : set[T] # References of the keys in cache
lowerCAmelCase : int = 1_0 # Maximum capacity of cache
def __init__( self : Optional[Any] ,_snake_case : int ) -> None:
"""simple docstring"""
lowercase__ : int = deque()
lowercase__ : Any = set()
if not n:
lowercase__ : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowercase__ : int = n
def UpperCAmelCase ( self : str ,_snake_case : T ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(_snake_case )
else:
self.dq_store.remove(_snake_case )
self.dq_store.appendleft(_snake_case )
self.key_reference.add(_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(_snake_case )
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 302
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["pixel_values"]
def __init__( self : Tuple ,_snake_case : bool = True ,_snake_case : Optional[Dict[str, int]] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : Dict[str, int] = None ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : Optional[Any] ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : str = size if size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case )
lowercase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case ,param_name='''crop_size''' )
lowercase__ : Tuple = do_resize
lowercase__ : List[Any] = do_rescale
lowercase__ : Any = do_normalize
lowercase__ : List[str] = do_center_crop
lowercase__ : Optional[Any] = crop_size
lowercase__ : Union[str, Any] = size
lowercase__ : Any = resample
lowercase__ : int = rescale_factor
lowercase__ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : str ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : List[str] = get_size_dict(_snake_case )
if "shortest_edge" in size:
lowercase__ : str = get_resize_output_image_size(_snake_case ,size=size['''shortest_edge'''] ,default_to_square=_snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowercase__ : int = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Tuple ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Optional[Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : float ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[int] ) -> np.ndarray:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : ImageInput ,_snake_case : Optional[bool] = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : int = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[float] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**_snake_case : List[str] ,) -> BatchFeature:
"""simple docstring"""
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Tuple = get_size_dict(_snake_case ,param_name='''crop_size''' ,default_to_square=_snake_case )
lowercase__ : Tuple = resample if resample is not None else self.resample
lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : int = get_size_dict(_snake_case )
if not is_batched(_snake_case ):
lowercase__ : Optional[Any] = [images]
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase__ : str = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : int = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
lowercase__ : str = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
lowercase__ : Optional[Any] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : List[str] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Any = {'''pixel_values''': images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 302
| 1
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __A :
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : List[str] ,_snake_case : int ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase__ : Union[str, Any] = img
lowercase__ : Optional[int] = img.shape[1]
lowercase__ : Dict = img.shape[0]
lowercase__ : Dict = dst_width
lowercase__ : Optional[int] = dst_height
lowercase__ : Union[str, Any] = self.src_w / self.dst_w
lowercase__ : List[Any] = self.src_h / self.dst_h
lowercase__ : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255
)
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase__ : Union[str, Any] = self.img[self.get_y(_snake_case )][self.get_x(_snake_case )]
def UpperCAmelCase ( self : List[str] ,_snake_case : int ) -> int:
"""simple docstring"""
return int(self.ratio_x * x )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : int ) -> int:
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCAmelCase_ ,lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread('image_data/lena.jpg', 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 302
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302
| 1
|
"""simple docstring"""
import numpy
# List of input, output pairs
lowerCAmelCase_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase_ = [2, 4, 1, 5]
lowerCAmelCase_ = len(train_data)
lowerCAmelCase_ = 0.0_0_9
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase="train" ) -> Optional[Any]:
return calculate_hypothesis_value(__lowerCamelCase , __lowerCamelCase ) - output(
__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : Union[str, Any] = 0
for i in range(len(__lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=m ) -> Any:
lowercase__ : Union[str, Any] = 0
for i in range(__lowerCamelCase ):
if index == -1:
summation_value += _error(__lowerCamelCase )
else:
summation_value += _error(__lowerCamelCase ) * train_data[i][0][index]
return summation_value
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = summation_of_cost_derivative(__lowerCamelCase , __lowerCamelCase ) / m
return cost_derivative_value
def __UpperCAmelCase ( ) -> Any:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase__ : Optional[int] = 0.0_0_0_0_0_2
lowercase__ : List[Any] = 0
lowercase__ : List[Any] = 0
while True:
j += 1
lowercase__ : Tuple = [0, 0, 0, 0]
for i in range(0 , len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = get_cost_derivative(i - 1 )
lowercase__ : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCamelCase , __lowerCamelCase , atol=__lowerCamelCase , rtol=__lowerCamelCase , ):
break
lowercase__ : List[str] = temp_parameter_vector
print(('''Number of iterations:''', j) )
def __UpperCAmelCase ( ) -> str:
for i in range(len(__lowerCamelCase ) ):
print(('''Actual output value:''', output(__lowerCamelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(__lowerCamelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 302
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = "cpu" , __lowerCamelCase = None ) -> None:
lowercase__ : List[str] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowercase__ : List[Any] = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Any = src_path
torch.save(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 302
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = "altclip_text_model"
def __init__( self : Dict ,_snake_case : Dict=250_002 ,_snake_case : Union[str, Any]=1_024 ,_snake_case : Optional[Any]=24 ,_snake_case : Tuple=16 ,_snake_case : Optional[int]=4_096 ,_snake_case : List[Any]="gelu" ,_snake_case : Any=0.1 ,_snake_case : Optional[Any]=0.1 ,_snake_case : Dict=514 ,_snake_case : Optional[Any]=1 ,_snake_case : List[Any]=0.02 ,_snake_case : Optional[int]=0.02 ,_snake_case : Any=1e-05 ,_snake_case : str=1 ,_snake_case : Dict=0 ,_snake_case : List[Any]=2 ,_snake_case : List[Any]="absolute" ,_snake_case : str=True ,_snake_case : int=768 ,**_snake_case : Dict ,) -> int:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Dict = intermediate_size
lowercase__ : str = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = initializer_factor
lowercase__ : List[Any] = layer_norm_eps
lowercase__ : List[str] = position_embedding_type
lowercase__ : int = use_cache
lowercase__ : Union[str, Any] = project_dim
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = "altclip_vision_model"
def __init__( self : Any ,_snake_case : Union[str, Any]=768 ,_snake_case : List[Any]=3_072 ,_snake_case : Union[str, Any]=512 ,_snake_case : List[Any]=12 ,_snake_case : Tuple=12 ,_snake_case : Union[str, Any]=3 ,_snake_case : Any=224 ,_snake_case : int=32 ,_snake_case : int="quick_gelu" ,_snake_case : Union[str, Any]=1e-5 ,_snake_case : Tuple=0.0 ,_snake_case : Optional[Any]=0.02 ,_snake_case : Optional[Any]=1.0 ,**_snake_case : Dict ,) -> Optional[int]:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : List[Any] = hidden_size
lowercase__ : Dict = intermediate_size
lowercase__ : str = projection_dim
lowercase__ : Dict = num_hidden_layers
lowercase__ : List[Any] = num_attention_heads
lowercase__ : List[Any] = num_channels
lowercase__ : Optional[Any] = patch_size
lowercase__ : str = image_size
lowercase__ : int = initializer_range
lowercase__ : Optional[int] = initializer_factor
lowercase__ : List[Any] = attention_dropout
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : Optional[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] ,_snake_case : Union[str, os.PathLike] ,**_snake_case : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
lowercase__ , lowercase__ : Optional[Any] = cls.get_config_dict(_snake_case ,**_snake_case )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowercase__ : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case ,**_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = "altclip"
lowerCAmelCase : str = True
def __init__( self : Any ,_snake_case : int=None ,_snake_case : Optional[Any]=None ,_snake_case : Optional[Any]=768 ,_snake_case : Optional[Any]=2.6592 ,**_snake_case : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = kwargs.pop('''text_config_dict''' ,_snake_case )
lowercase__ : Tuple = kwargs.pop('''vision_config_dict''' ,_snake_case )
super().__init__(**_snake_case )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase__ : Any = {}
# This is the complete result when using `text_config_dict`.
lowercase__ : Any = AltCLIPTextConfig(**_snake_case ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase__ : Union[str, Any] = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
lowercase__ : Union[str, Any] = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(_snake_case )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase__ : int = {}
# This is the complete result when using `vision_config_dict`.
lowercase__ : Dict = AltCLIPVisionConfig(**_snake_case ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase__ : Union[str, Any] = {
str(_snake_case ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase__ : Optional[int] = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
lowercase__ : Union[str, Any] = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(_snake_case )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase__ : Any = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowercase__ : Union[str, Any] = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowercase__ : str = AltCLIPTextConfig(**_snake_case )
lowercase__ : int = AltCLIPVisionConfig(**_snake_case )
lowercase__ : Any = projection_dim
lowercase__ : Tuple = logit_scale_init_value
lowercase__ : Optional[Any] = 1.0
@classmethod
def UpperCAmelCase ( cls : List[str] ,_snake_case : AltCLIPTextConfig ,_snake_case : AltCLIPVisionConfig ,**_snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_snake_case )
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowercase__ : Dict = copy.deepcopy(self.__dict__ )
lowercase__ : Any = self.text_config.to_dict()
lowercase__ : Optional[Any] = self.vision_config.to_dict()
lowercase__ : str = self.__class__.model_type
return output
| 302
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ , lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 302
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = False
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
lowerCAmelCase_ = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
lowerCAmelCase_ = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
lowerCAmelCase_ = reader.read()
lowerCAmelCase_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
lowerCAmelCase_ = UNetaDModel(**config)
else:
lowerCAmelCase_ = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
lowerCAmelCase_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase_ = config[key]
del config[key]
lowerCAmelCase_ = [k.replace('UNetRes', '') for k in config['down_block_types']]
lowerCAmelCase_ = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
lowerCAmelCase_ = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
lowerCAmelCase_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
lowerCAmelCase_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
lowerCAmelCase_ = param_value
lowerCAmelCase_ = True
if not has_changed:
lowerCAmelCase_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 302
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "maskformer"
lowerCAmelCase : Any = {"hidden_size": "mask_feature_size"}
lowerCAmelCase : Optional[int] = ["resnet", "swin"]
lowerCAmelCase : str = ["detr"]
def __init__( self : int ,_snake_case : int = 256 ,_snake_case : int = 256 ,_snake_case : float = 0.1 ,_snake_case : bool = False ,_snake_case : Optional[Dict] = None ,_snake_case : Optional[Dict] = None ,_snake_case : float = 0.02 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 20.0 ,_snake_case : Optional[bool] = None ,**_snake_case : Optional[Any] ,) -> Dict:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ : Any = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,)
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[str] = backbone_config.pop('''model_type''' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ : Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(_snake_case ,_snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase__ : Optional[Any] = config_class.from_dict(_snake_case )
lowercase__ : List[Any] = backbone_config
lowercase__ : List[Any] = decoder_config
# main feature dimension for the model
lowercase__ : List[str] = fpn_feature_size
lowercase__ : int = mask_feature_size
# initializer
lowercase__ : str = init_std
lowercase__ : str = init_xavier_std
# Hungarian matcher && loss
lowercase__ : Optional[int] = cross_entropy_weight
lowercase__ : List[Any] = dice_weight
lowercase__ : List[str] = mask_weight
lowercase__ : str = use_auxiliary_loss
lowercase__ : Optional[int] = no_object_weight
lowercase__ : Optional[Any] = output_auxiliary_logits
lowercase__ : Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase__ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return cls(
backbone_config=_snake_case ,decoder_config=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : str ) -> Dict[str, any]:
"""simple docstring"""
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : List[Any] = self.decoder_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 302
| 1
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger('transformers.models.speecht5')
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
hf_model.apply_weight_norm()
lowercase__ : Optional[int] = checkpoint['''input_conv.weight_g''']
lowercase__ : Optional[int] = checkpoint['''input_conv.weight_v''']
lowercase__ : str = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
lowercase__ : Tuple = checkpoint[f"""upsamples.{i}.1.weight_g"""]
lowercase__ : Optional[Any] = checkpoint[f"""upsamples.{i}.1.weight_v"""]
lowercase__ : Optional[Any] = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase__ : Dict = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
lowercase__ : Dict = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
lowercase__ : Any = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
lowercase__ : Dict = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
lowercase__ : Optional[Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
lowercase__ : str = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
lowercase__ : int = checkpoint['''output_conv.1.weight_g''']
lowercase__ : int = checkpoint['''output_conv.1.weight_v''']
lowercase__ : int = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> str:
if config_path is not None:
lowercase__ : Tuple = SpeechTaHifiGanConfig.from_pretrained(__lowerCamelCase )
else:
lowercase__ : Union[str, Any] = SpeechTaHifiGanConfig()
lowercase__ : Dict = SpeechTaHifiGan(__lowerCamelCase )
lowercase__ : str = torch.load(__lowerCamelCase )
load_weights(orig_checkpoint['''model''']['''generator'''] , __lowerCamelCase , __lowerCamelCase )
lowercase__ : Tuple = np.load(__lowerCamelCase )
lowercase__ : Dict = stats[0].reshape(-1 )
lowercase__ : str = stats[1].reshape(-1 )
lowercase__ : Union[str, Any] = torch.from_numpy(__lowerCamelCase ).float()
lowercase__ : Optional[Any] = torch.from_numpy(__lowerCamelCase ).float()
model.save_pretrained(__lowerCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 302
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : Dict = [3, 3, 3, 3]
lowercase__ : str = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : List[str] = [4, 4, 4, 4]
lowercase__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
else:
lowercase__ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[int] = 96
elif "small" in model_name:
lowercase__ : Union[str, Any] = 96
elif "base" in model_name:
lowercase__ : Tuple = 1_28
elif "large" in model_name:
lowercase__ : Any = 1_92
elif "xlarge" in model_name:
lowercase__ : Any = 2_56
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 3_52
# set label information
lowercase__ : List[Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ : Optional[int] = '''imagenet-22k-id2label.json'''
else:
lowercase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowercase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ : Dict = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ : Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ : Union[str, Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ : Dict = '''layernorm.bias'''
if "head" in name:
lowercase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__ : List[Any] = '''focalnet.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[str]:
# fmt: off
lowercase__ : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ : Optional[int] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowercase__ : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ : int = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
lowercase__ : List[Any] = get_focalnet_config(__lowerCamelCase )
lowercase__ : Optional[int] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : int = BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowercase__ : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase__ : List[str] = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
lowercase__ : Optional[Any] = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__ : Optional[int] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Dict = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__ : List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCAmelCase : int = "ChineseCLIPImageProcessor"
lowerCAmelCase : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple ,_snake_case : str=None ,_snake_case : Union[str, Any]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
lowercase__ : List[Any] = self.image_processor
def __call__( self : List[Any] ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : List[Any]=None ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : List[Any] ,**_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,*_snake_case : Tuple ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : int ) -> None:
"""simple docstring"""
lowercase__ : Union[str, Any] = value
lowercase__ : Node | None = None
lowercase__ : Node | None = None
class __A :
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : Node ) -> None:
"""simple docstring"""
lowercase__ : List[str] = tree
def UpperCAmelCase ( self : Any ,_snake_case : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Optional[Any] ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 1
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : Any = image.size
lowercase__ : Optional[int] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ : int = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
lowercase__ : Dict = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
lowercase__ : Tuple = image[None].transpose(0 , 3 , 1 , 2 )
lowercase__ : int = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class __A ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : VQModel ,_snake_case : UNetaDModel ,_snake_case : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] ,) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=__a ,unet=__a ,scheduler=__a )
@torch.no_grad()
def __call__( self : Tuple ,_snake_case : Union[torch.Tensor, PIL.Image.Image] = None ,_snake_case : Optional[int] = 1 ,_snake_case : Optional[int] = 100 ,_snake_case : Optional[float] = 0.0 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(__a ,PIL.Image.Image ):
lowercase__ : Union[str, Any] = 1
elif isinstance(__a ,torch.Tensor ):
lowercase__ : str = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__a )}""" )
if isinstance(__a ,PIL.Image.Image ):
lowercase__ : Tuple = preprocess(__a )
lowercase__ : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase__ : Dict = (batch_size, self.unet.config.in_channels // 2, height, width)
lowercase__ : Optional[int] = next(self.unet.parameters() ).dtype
lowercase__ : int = randn_tensor(__a ,generator=__a ,device=self.device ,dtype=__a )
lowercase__ : Union[str, Any] = image.to(device=self.device ,dtype=__a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__a ,device=self.device )
lowercase__ : List[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : str = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : str = {}
if accepts_eta:
lowercase__ : Optional[Any] = eta
for t in self.progress_bar(__a ):
# concat latents and low resolution image in the channel dimension.
lowercase__ : int = torch.cat([latents, image] ,dim=1 )
lowercase__ : Optional[int] = self.scheduler.scale_model_input(__a ,__a )
# predict the noise residual
lowercase__ : List[str] = self.unet(__a ,__a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : List[Any] = self.scheduler.step(__a ,__a ,__a ,**__a ).prev_sample
# decode the image latents with the VQVAE
lowercase__ : Any = self.vqvae.decode(__a ).sample
lowercase__ : List[Any] = torch.clamp(__a ,-1.0 ,1.0 )
lowercase__ : Union[str, Any] = image / 2 + 0.5
lowercase__ : str = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : str = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 350
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
lowerCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ : str = import_module('''tasks''' )
try:
lowercase__ : List[str] = getattr(__lowerCamelCase , model_args.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowercase__ : Dict[int, str] = dict(enumerate(__lowerCamelCase ) )
lowercase__ : Optional[int] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ : str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase , __lowerCamelCase ) -> Tuple[List[int], List[int]]:
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=2 )
lowercase__ , lowercase__ : Tuple = preds.shape
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
lowercase__ : Tuple = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase ) -> Dict:
lowercase__ , lowercase__ : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase__ : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase__ : Optional[int] = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(__lowerCamelCase )
lowercase__ , lowercase__ : Tuple = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 302
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Tuple = 1
lowercase__ : List[Any] = 3
lowercase__ : List[Any] = (32, 32)
lowercase__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__A )
return image
@property
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=__A ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : int = AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
return model
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act='''gelu''' ,projection_dim=512 ,)
return CLIPTextModel(__A )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[str] = self.dummy_cond_unet_upscale
lowercase__ : List[str] = DDPMScheduler()
lowercase__ : Tuple = DDIMScheduler(prediction_type='''v_prediction''' )
lowercase__ : Any = self.dummy_vae
lowercase__ : Any = self.dummy_text_encoder
lowercase__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : Optional[Any] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowercase__ : Tuple = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase__ : Optional[Any] = StableDiffusionUpscalePipeline(
unet=__A ,low_res_scheduler=__A ,scheduler=__A ,vae=__A ,text_encoder=__A ,tokenizer=__A ,max_noise_level=350 ,)
lowercase__ : List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
lowercase__ : List[str] = """A painting of a squirrel eating a burger"""
lowercase__ : Tuple = torch.Generator(device=__A ).manual_seed(0 )
lowercase__ : Any = sd_pipe(
[prompt] ,image=__A ,generator=__A ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase__ : int = output.images
lowercase__ : Optional[Any] = torch.Generator(device=__A ).manual_seed(0 )
lowercase__ : List[Any] = sd_pipe(
[prompt] ,image=__A ,generator=__A ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='''np''' ,return_dict=__A ,)[0]
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowercase__ : str = image_from_tuple[0, -3:, -3:, -1]
lowercase__ : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowercase__ : Tuple = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ : Any = self.dummy_cond_unet_upscale
lowercase__ : str = DDPMScheduler()
lowercase__ : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
lowercase__ : Dict = self.dummy_vae
lowercase__ : str = self.dummy_text_encoder
lowercase__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : List[Any] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowercase__ : Optional[int] = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase__ : List[str] = StableDiffusionUpscalePipeline(
unet=__A ,low_res_scheduler=__A ,scheduler=__A ,vae=__A ,text_encoder=__A ,tokenizer=__A ,max_noise_level=350 ,)
lowercase__ : Any = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
lowercase__ : Optional[int] = """A painting of a squirrel eating a burger"""
lowercase__ : Union[str, Any] = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase__ : Any = output.images
assert image.shape[0] == 2
lowercase__ : int = torch.Generator(device=__A ).manual_seed(0 )
lowercase__ : Dict = sd_pipe(
[prompt] ,image=__A ,generator=__A ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase__ : List[str] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''' )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.dummy_cond_unet_upscale
lowercase__ : str = DDPMScheduler()
lowercase__ : str = DDIMScheduler(prediction_type='''v_prediction''' )
lowercase__ : int = self.dummy_vae
lowercase__ : int = self.dummy_text_encoder
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : Dict = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowercase__ : str = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowercase__ : Optional[Any] = unet.half()
lowercase__ : Optional[int] = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowercase__ : Optional[int] = StableDiffusionUpscalePipeline(
unet=__A ,low_res_scheduler=__A ,scheduler=__A ,vae=__A ,text_encoder=__A ,tokenizer=__A ,max_noise_level=350 ,)
lowercase__ : List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
lowercase__ : Dict = """A painting of a squirrel eating a burger"""
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[Any] = sd_pipe(
[prompt] ,image=__A ,generator=__A ,num_inference_steps=2 ,output_type='''np''' ,).images
lowercase__ : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase__ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
lowercase__ : Optional[Any] = """stabilityai/stable-diffusion-x4-upscaler"""
lowercase__ : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowercase__ : List[str] = """a cat sitting on a park bench"""
lowercase__ : Tuple = torch.manual_seed(0 )
lowercase__ : List[Any] = pipe(
prompt=__A ,image=__A ,generator=__A ,output_type='''np''' ,)
lowercase__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase__ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
lowercase__ : List[str] = """stabilityai/stable-diffusion-x4-upscaler"""
lowercase__ : int = StableDiffusionUpscalePipeline.from_pretrained(
__A ,torch_dtype=torch.floataa ,)
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowercase__ : List[str] = """a cat sitting on a park bench"""
lowercase__ : Union[str, Any] = torch.manual_seed(0 )
lowercase__ : Optional[Any] = pipe(
prompt=__A ,image=__A ,generator=__A ,output_type='''np''' ,)
lowercase__ : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase__ : Dict = """stabilityai/stable-diffusion-x4-upscaler"""
lowercase__ : List[str] = StableDiffusionUpscalePipeline.from_pretrained(
__A ,torch_dtype=torch.floataa ,)
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ : Dict = """a cat sitting on a park bench"""
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : Union[str, Any] = pipe(
prompt=__A ,image=__A ,generator=__A ,num_inference_steps=5 ,output_type='''np''' ,)
lowercase__ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 351
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[int]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Dict = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : Any = 2
# Initialize accelerator
lowercase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : List[Any] = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : List[str] = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : int = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : int = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 302
| 0
|
"""simple docstring"""
from ....utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : List[Any] ,_snake_case : List[Any]=None ,_snake_case : int=2_048 ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = config.__dict__
lowercase__ : Optional[int] = modal_hidden_size
if num_labels:
lowercase__ : str = num_labels
| 352
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : str = tokenizer('''This is me''' ,return_tensors='''pt''' )
lowercase__ : Tuple = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ : Optional[int] = model.generate(**_snake_case )
lowercase__ : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ : int = model_reloaded.generate(**_snake_case )
self.assertTrue(torch.allclose(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_snake_case ):
model.save_pretrained(_snake_case )
lowercase__ : int = model.reverse_bettertransformer()
model.save_pretrained(_snake_case )
| 302
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( ):
return 1
def __UpperCAmelCase ( __lowerCamelCase ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __UpperCAmelCase ( __lowerCamelCase ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__lowerCAmelCase )
def __UpperCAmelCase ( __lowerCamelCase ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__lowerCAmelCase )
def __UpperCAmelCase ( __lowerCamelCase ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__lowerCAmelCase )
def __UpperCAmelCase ( __lowerCamelCase ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__lowerCAmelCase )
def __UpperCAmelCase ( __lowerCamelCase ):
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(__lowerCAmelCase )
def __UpperCAmelCase ( __lowerCamelCase ):
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(__lowerCAmelCase )
def __UpperCAmelCase ( __lowerCamelCase = 2_00 ):
return two_pound(__lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 353
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Any:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
lowercase__ : List[Any] = torch.load(__lowerCamelCase , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
lowercase__ : int = convert_pytorch_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowercase__ : Dict = convert_pytorch_sharded_state_dict_to_flax(__lowerCamelCase , __lowerCamelCase )
return flax_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(__lowerCamelCase ) -> bool:
return len(set(__lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowercase__ : int = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowercase__ : Any = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowercase__ : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
lowercase__ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ : Optional[int] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowercase__ : List[str] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowercase__ : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# convert pytorch tensor to numpy
lowercase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowercase__ : str = flax_model.params['''params''']
else:
lowercase__ : Optional[int] = flax_model.params
lowercase__ : Optional[Any] = flatten_dict(__lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Tuple = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__lowerCamelCase )
lowercase__ : int = {}
lowercase__ : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : Optional[Any] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : List[str] = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowercase__ : int = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : Tuple = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Any = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
import torch
# Load the index
lowercase__ : Dict = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowercase__ : Optional[int] = torch.load(__lowerCamelCase )
lowercase__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__ : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__ : Optional[Any] = flax_model.params['''params''']
lowercase__ : List[Any] = flatten_dict(__lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowercase__ : Union[str, Any] = flax_model.params
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase__ : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : List[str] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase__ : Tuple = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__ : str = rename_key_and_reshape_tensor(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# add model prefix if necessary
lowercase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
continue
if "var" in flax_key[-1]:
lowercase__ : str = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowercase__ : List[str] = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowercase__ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : List[str] = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
lowercase__ : Optional[int] = getattr(__lowerCamelCase , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCamelCase , '''rb''' ) as state_f:
try:
lowercase__ : str = from_bytes(__lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase__ : Any = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa , __lowerCamelCase ) ).values()
if any(__lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase__ : Union[str, Any] = jax.tree_util.tree_map(
lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCamelCase )
lowercase__ : Tuple = flatten_dict(__lowerCamelCase )
lowercase__ : List[str] = pt_model.state_dict()
lowercase__ : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowercase__ : int = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowercase__ : List[str] = []
lowercase__ : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ : List[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
lowercase__ : Optional[int] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__ : Tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__ : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCamelCase ) not in pt_model_dict:
# conv layer
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : List[str] = jnp.transpose(__lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ) not in pt_model_dict:
# linear layer
lowercase__ : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
lowercase__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowercase__ : Any = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowercase__ : Dict = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowercase__ : Union[str, Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowercase__ : Dict = '''.'''.join(__lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowercase__ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowercase__ : str = key.split('''.''' )
lowercase__ : Optional[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowercase__ : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowercase__ : str = key_components[-2] + '''_v'''
if name is not None:
lowercase__ : Optional[int] = key_components[:-3] + [name]
lowercase__ : List[str] = '''.'''.join(__lowerCamelCase )
lowercase__ : List[Any] = key
if flax_key in special_pt_names:
lowercase__ : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowercase__ : List[str] = np.asarray(__lowerCamelCase ) if not isinstance(__lowerCamelCase , np.ndarray ) else flax_tensor
lowercase__ : List[str] = torch.from_numpy(__lowerCamelCase )
# remove from missing keys
missing_keys.remove(__lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCamelCase )
pt_model.load_state_dict(__lowerCamelCase )
# re-transform missing_keys to list
lowercase__ : Optional[Any] = list(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(__lowerCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 302
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __A ( a__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 4_2
class __A ( a__ ,a__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Any ,_snake_case : int = 3 ,_snake_case : int = 3 ,_snake_case : Tuple[str] = ("DownEncoderBlock2D",) ,_snake_case : Tuple[str] = ("UpDecoderBlock2D",) ,_snake_case : Tuple[int] = (64,) ,_snake_case : int = 1 ,_snake_case : str = "silu" ,_snake_case : int = 3 ,_snake_case : int = 32 ,_snake_case : int = 256 ,_snake_case : int = 32 ,_snake_case : Optional[int] = None ,_snake_case : float = 0.1_8215 ,_snake_case : str = "group" ,) -> int:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowercase__ : Optional[Any] = Encoder(
in_channels=_lowerCamelCase ,out_channels=_lowerCamelCase ,down_block_types=_lowerCamelCase ,block_out_channels=_lowerCamelCase ,layers_per_block=_lowerCamelCase ,act_fn=_lowerCamelCase ,norm_num_groups=_lowerCamelCase ,double_z=_lowerCamelCase ,)
lowercase__ : int = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowercase__ : Tuple = nn.Convad(_lowerCamelCase ,_lowerCamelCase ,1 )
lowercase__ : Dict = VectorQuantizer(_lowerCamelCase ,_lowerCamelCase ,beta=0.25 ,remap=_lowerCamelCase ,sane_index_shape=_lowerCamelCase )
lowercase__ : str = nn.Convad(_lowerCamelCase ,_lowerCamelCase ,1 )
# pass init params to Decoder
lowercase__ : List[Any] = Decoder(
in_channels=_lowerCamelCase ,out_channels=_lowerCamelCase ,up_block_types=_lowerCamelCase ,block_out_channels=_lowerCamelCase ,layers_per_block=_lowerCamelCase ,act_fn=_lowerCamelCase ,norm_num_groups=_lowerCamelCase ,norm_type=_lowerCamelCase ,)
@apply_forward_hook
def UpperCAmelCase ( self : List[Any] ,_snake_case : torch.FloatTensor ,_snake_case : bool = True ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = self.encoder(_lowerCamelCase )
lowercase__ : str = self.quant_conv(_lowerCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCamelCase )
@apply_forward_hook
def UpperCAmelCase ( self : int ,_snake_case : torch.FloatTensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> List[Any]:
"""simple docstring"""
if not force_not_quantize:
lowercase__ : Union[str, Any] = self.quantize(_lowerCamelCase )
else:
lowercase__ : Tuple = h
lowercase__ : int = self.post_quant_conv(_lowerCamelCase )
lowercase__ : List[Any] = self.decoder(_lowerCamelCase ,quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : torch.FloatTensor ,_snake_case : bool = True ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = sample
lowercase__ : Dict = self.encode(_lowerCamelCase ).latents
lowercase__ : Optional[Any] = self.decode(_lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
| 354
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Any ,_snake_case : UNetaDModel ,_snake_case : UNetaDModel ,_snake_case : DDPMScheduler ,_snake_case : Any ,) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = value_function
lowercase__ : Optional[int] = unet
lowercase__ : Tuple = scheduler
lowercase__ : Dict = env
lowercase__ : int = env.get_dataset()
lowercase__ : Dict = {}
for key in self.data.keys():
try:
lowercase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ : List[Any] = {}
for key in self.data.keys():
try:
lowercase__ : str = self.data[key].std()
except: # noqa: E722
pass
lowercase__ : Tuple = env.observation_space.shape[0]
lowercase__ : Optional[int] = env.action_space.shape[0]
def UpperCAmelCase ( self : str ,_snake_case : Any ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase ( self : Dict ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
if type(_snake_case ) is dict:
return {k: self.to_torch(_snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(_snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(_snake_case ,device=self.unet.device )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
lowercase__ : List[Any] = val.clone()
return x_in
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = x.shape[0]
lowercase__ : Dict = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ : Dict = torch.full((batch_size,) ,_snake_case ,device=self.unet.device ,dtype=torch.long )
for _ in range(_snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ : int = self.value_function(x.permute(0 ,2 ,1 ) ,_snake_case ).sample
lowercase__ : Optional[Any] = torch.autograd.grad([y.sum()] ,[x] )[0]
lowercase__ : List[str] = self.scheduler._get_variance(_snake_case )
lowercase__ : Union[str, Any] = torch.exp(0.5 * posterior_variance )
lowercase__ : Optional[int] = model_std * grad
lowercase__ : Optional[Any] = 0
lowercase__ : str = x.detach()
lowercase__ : Dict = x + scale * grad
lowercase__ : str = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.unet(x.permute(0 ,2 ,1 ) ,_snake_case ).sample.permute(0 ,2 ,1 )
# TODO: verify deprecation of this kwarg
lowercase__ : Dict = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ,predict_epsilon=_snake_case )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase__ : Dict = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.to_torch(_snake_case )
return x, y
def __call__( self : Union[str, Any] ,_snake_case : Any ,_snake_case : Tuple=64 ,_snake_case : Any=32 ,_snake_case : Optional[Any]=2 ,_snake_case : str=0.1 ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = self.normalize(_snake_case ,'''observations''' )
lowercase__ : Tuple = obs[None].repeat(_snake_case ,axis=0 )
lowercase__ : Dict = {0: self.to_torch(_snake_case )}
lowercase__ : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ : Optional[int] = randn_tensor(_snake_case ,device=self.unet.device )
lowercase__ : Tuple = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : str = self.to_torch(_snake_case )
# run the diffusion process
lowercase__ , lowercase__ : int = self.run_diffusion(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# sort output trajectories by value
lowercase__ : Optional[Any] = y.argsort(0 ,descending=_snake_case ).squeeze()
lowercase__ : str = x[sorted_idx]
lowercase__ : str = sorted_values[:, :, : self.action_dim]
lowercase__ : Optional[int] = actions.detach().cpu().numpy()
lowercase__ : List[str] = self.de_normalize(_snake_case ,key='''actions''' )
# select the action with the highest value
if y is not None:
lowercase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ : str = np.random.randint(0 ,_snake_case )
lowercase__ : int = denorm_actions[selected_index, 0]
return denorm_actions
| 302
| 0
|
from __future__ import annotations
import math
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_ = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
if not isinstance(__a , __a ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
lowercase__ : Optional[int] = []
for num in range(len(__a ) ):
lowercase__ : List[str] = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ : Optional[Any] = odd_composites[num] - 2 * i * i
if is_prime(__a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__a ) == n:
return list_nums
return []
def __UpperCAmelCase ( ) -> str:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 355
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/krishnap25/mauve''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/krishnap25/mauve'''] ,reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : List[str]=None ,_snake_case : Tuple=None ,_snake_case : List[Any]=None ,_snake_case : Any=None ,_snake_case : Optional[int]="auto" ,_snake_case : Optional[int]=-1 ,_snake_case : Optional[int]=0.9 ,_snake_case : Any=5 ,_snake_case : Dict=500 ,_snake_case : Optional[int]="gpt2-large" ,_snake_case : Optional[Any]=-1 ,_snake_case : Tuple=1_024 ,_snake_case : Optional[int]=25 ,_snake_case : Dict=5 ,_snake_case : int=True ,_snake_case : Union[str, Any]=25 ,) -> Any:
"""simple docstring"""
lowercase__ : Any = compute_mauve(
p_text=_snake_case ,q_text=_snake_case ,p_features=_snake_case ,q_features=_snake_case ,p_tokens=_snake_case ,q_tokens=_snake_case ,num_buckets=_snake_case ,pca_max_data=_snake_case ,kmeans_explained_var=_snake_case ,kmeans_num_redo=_snake_case ,kmeans_max_iter=_snake_case ,featurize_model_name=_snake_case ,device_id=_snake_case ,max_text_length=_snake_case ,divergence_curve_discretization_size=_snake_case ,mauve_scaling_factor=_snake_case ,verbose=_snake_case ,seed=_snake_case ,)
return out
| 302
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : Dict = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__snake_case , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__snake_case , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__snake_case )
return parser.parse_args()
def __UpperCAmelCase ( ) -> Optional[Any]:
lowercase__ : List[Any] = parse_args()
# Import training_script as a module.
lowercase__ : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase__ : Dict = script_fpath.stem
lowercase__ : Optional[Any] = importlib.import_module(__snake_case )
# Patch sys.argv
lowercase__ : Optional[int] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 356
|
"""simple docstring"""
import math
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Tuple = 0
lowercase__ : Tuple = 0
while num > 0:
lowercase__ : int = num % 8
lowercase__ : Tuple = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
lowercase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(__lowerCamelCase )}"""
def __UpperCAmelCase ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 302
| 0
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class __A :
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : List[str]=None ,**_snake_case : Any ) -> Dict:
"""simple docstring"""
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
lowercase__ : int = model
lowercase__ : str = kwargs.get('''model_save_dir''' ,_snake_case )
lowercase__ : Optional[int] = kwargs.get('''latest_model_name''' ,_snake_case )
def __call__( self : Union[str, Any] ,**_snake_case : List[str] ) -> int:
"""simple docstring"""
lowercase__ : Dict = {k: np.array(_snake_case ) for k, v in kwargs.items()}
return self.model.run(_snake_case ,_snake_case )
@staticmethod
def UpperCAmelCase ( _snake_case : Union[str, Path] ,_snake_case : Optional[int]=None ,_snake_case : Tuple=None ) -> Any:
"""simple docstring"""
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
lowercase__ : Optional[Any] = '''CPUExecutionProvider'''
return ort.InferenceSession(_snake_case ,providers=[provider] ,sess_options=_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : Union[str, Path] ,_snake_case : Optional[str] = None ,**_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowercase__ : List[str] = self.model_save_dir.joinpath(self.latest_model_name )
lowercase__ : List[str] = Path(_snake_case ).joinpath(_snake_case )
try:
shutil.copyfile(_snake_case ,_snake_case )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowercase__ : Dict = self.model_save_dir.joinpath(_snake_case )
if src_path.exists():
lowercase__ : Optional[int] = Path(_snake_case ).joinpath(_snake_case )
try:
shutil.copyfile(_snake_case ,_snake_case )
except shutil.SameFileError:
pass
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Union[str, os.PathLike] ,**_snake_case : Optional[int] ,) -> Dict:
"""simple docstring"""
if os.path.isfile(_snake_case ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_snake_case ,exist_ok=_snake_case )
# saving model weights/files
self._save_pretrained(_snake_case ,**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : Union[str, Path] ,_snake_case : Optional[Union[bool, str, None]] = None ,_snake_case : Optional[Union[str, None]] = None ,_snake_case : bool = False ,_snake_case : Optional[str] = None ,_snake_case : Optional[str] = None ,_snake_case : Optional[str] = None ,_snake_case : Optional["ort.SessionOptions"] = None ,**_snake_case : Any ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_snake_case ):
lowercase__ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_snake_case ,_snake_case ) ,provider=_snake_case ,sess_options=_snake_case )
lowercase__ : int = Path(_snake_case )
# load model from hub
else:
# download model
lowercase__ : str = hf_hub_download(
repo_id=_snake_case ,filename=_snake_case ,use_auth_token=_snake_case ,revision=_snake_case ,cache_dir=_snake_case ,force_download=_snake_case ,)
lowercase__ : Optional[Any] = Path(_snake_case ).parent
lowercase__ : Tuple = Path(_snake_case ).name
lowercase__ : Any = OnnxRuntimeModel.load_model(_snake_case ,provider=_snake_case ,sess_options=_snake_case )
return cls(model=_snake_case ,**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Tuple ,_snake_case : Union[str, Path] ,_snake_case : bool = True ,_snake_case : Optional[str] = None ,_snake_case : Optional[str] = None ,**_snake_case : str ,) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = None
if len(str(_snake_case ).split('''@''' ) ) == 2:
lowercase__ , lowercase__ : Optional[Any] = model_id.split('''@''' )
return cls._from_pretrained(
model_id=_snake_case ,revision=_snake_case ,cache_dir=_snake_case ,force_download=_snake_case ,use_auth_token=_snake_case ,**_snake_case ,)
| 357
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase_ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase_ = 'UperNetConfig'
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : int ,_snake_case : int ,_snake_case : Union[int, Tuple[int, int]] ,_snake_case : Union[int, Tuple[int, int], str] = 0 ,_snake_case : bool = False ,_snake_case : Union[int, Tuple[int, int]] = 1 ,) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = nn.Convad(
in_channels=_snake_case ,out_channels=_snake_case ,kernel_size=_snake_case ,padding=_snake_case ,bias=_snake_case ,dilation=_snake_case ,)
lowercase__ : Tuple = nn.BatchNormad(_snake_case )
lowercase__ : List[str] = nn.ReLU()
def UpperCAmelCase ( self : str ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.conv(_snake_case )
lowercase__ : List[str] = self.batch_norm(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : int ,_snake_case : int ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = [
nn.AdaptiveAvgPoolad(_snake_case ),
UperNetConvModule(_snake_case ,_snake_case ,kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Any = input
for layer in self.layers:
lowercase__ : int = layer(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : Tuple[int, ...] ,_snake_case : int ,_snake_case : int ,_snake_case : bool ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = pool_scales
lowercase__ : Dict = align_corners
lowercase__ : Optional[Any] = in_channels
lowercase__ : Optional[Any] = channels
lowercase__ : int = []
for i, pool_scale in enumerate(_snake_case ):
lowercase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_snake_case ,in_channels=_snake_case ,channels=_snake_case )
self.blocks.append(_snake_case )
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : torch.Tensor ) -> List[torch.Tensor]:
"""simple docstring"""
lowercase__ : int = []
for ppm in self.blocks:
lowercase__ : Any = ppm(_snake_case )
lowercase__ : int = nn.functional.interpolate(
_snake_case ,size=x.size()[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
ppm_outs.append(_snake_case )
return ppm_outs
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ : str = config
lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__ : Optional[Any] = in_channels
lowercase__ : Any = config.hidden_size
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
# PSP Module
lowercase__ : Dict = UperNetPyramidPoolingModule(
self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,)
lowercase__ : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
# FPN Module
lowercase__ : Any = nn.ModuleList()
lowercase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__ : List[Any] = UperNetConvModule(_snake_case ,self.channels ,kernel_size=1 )
lowercase__ : Optional[int] = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 )
self.lateral_convs.append(_snake_case )
self.fpn_convs.append(_snake_case )
lowercase__ : int = UperNetConvModule(
len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Dict = inputs[-1]
lowercase__ : Optional[int] = [x]
psp_outs.extend(self.psp_modules(_snake_case ) )
lowercase__ : Optional[Any] = torch.cat(_snake_case ,dim=1 )
lowercase__ : List[str] = self.bottleneck(_snake_case )
return output
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_snake_case ) )
# build top-down path
lowercase__ : List[Any] = len(_snake_case )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Union[str, Any] = laterals[i - 1].shape[2:]
lowercase__ : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] ,size=_snake_case ,mode='''bilinear''' ,align_corners=self.align_corners )
# build outputs
lowercase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Any = nn.functional.interpolate(
fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
lowercase__ : Any = torch.cat(_snake_case ,dim=1 )
lowercase__ : Any = self.fpn_bottleneck(_snake_case )
lowercase__ : str = self.classifier(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : List[Any] ,_snake_case : int = 2 ,_snake_case : int = 3 ,_snake_case : Union[int, Tuple[int, int]] = 1 ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = config
lowercase__ : Dict = config.auxiliary_in_channels
lowercase__ : Optional[int] = config.auxiliary_channels
lowercase__ : List[Any] = config.auxiliary_num_convs
lowercase__ : List[Any] = config.auxiliary_concat_input
lowercase__ : str = in_index
lowercase__ : Any = (kernel_size // 2) * dilation
lowercase__ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
if self.num_convs == 0:
lowercase__ : List[str] = nn.Identity()
else:
lowercase__ : Dict = nn.Sequential(*_snake_case )
if self.concat_input:
lowercase__ : int = UperNetConvModule(
self.in_channels + self.channels ,self.channels ,kernel_size=_snake_case ,padding=kernel_size // 2 )
lowercase__ : List[str] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : str = encoder_hidden_states[self.in_index]
lowercase__ : List[str] = self.convs(_snake_case )
if self.concat_input:
lowercase__ : Any = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) )
lowercase__ : Dict = self.classifier(_snake_case )
return output
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = UperNetConfig
lowerCAmelCase : str = "pixel_values"
lowerCAmelCase : Dict = True
def UpperCAmelCase ( self : int ,_snake_case : str ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : str=False ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[Any] = value
lowerCAmelCase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,A_ ,)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Tuple ) -> int:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__ : Any = UperNetHead(_snake_case ,in_channels=self.backbone.channels )
lowercase__ : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_snake_case ,config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs(
_snake_case ,output_hidden_states=_snake_case ,output_attentions=_snake_case )
lowercase__ : Optional[int] = outputs.feature_maps
lowercase__ : Tuple = self.decode_head(_snake_case )
lowercase__ : Optional[int] = nn.functional.interpolate(_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : List[str] = None
if self.auxiliary_head is not None:
lowercase__ : str = self.auxiliary_head(_snake_case )
lowercase__ : Dict = nn.functional.interpolate(
_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__ : Union[str, Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__ : Tuple = (logits,) + outputs[1:]
else:
lowercase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 302
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.