code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
def __init__( self : List[str] , __lowercase : Dict , __lowercase : List[Any] ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , __lowercase : int = 1 , __lowercase : int = 100 , __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase : Optional[float] = None , __lowercase : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
__a = self.unet.config.sample_size / self.unet.config.sample_rate
__a = audio_length_in_s * self.unet.config.sample_rate
__a = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it\'s bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
__a = int(_A )
if sample_size % down_scale_factor != 0:
__a = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
""" process.""" )
__a = int(_A )
__a = next(iter(self.unet.parameters() ) ).dtype
__a = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(_A )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__a = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__a = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__a = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__a = self.scheduler.step(_A , _A , _A ).prev_sample
__a = audio.clamp(-1 , 1 ).float().cpu().numpy()
__a = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 302
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mra'''
def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = block_per_row
__SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode
__SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
| 303
| 0
|
'''simple docstring'''
import os
import sys
lowercase : int = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase : Tuple = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *__A , **__A ) -> int:
return AutoConfig.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *__A , **__A ) -> Optional[int]:
return AutoTokenizer.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *__A , **__A ) -> Any:
return AutoModel.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *__A , **__A ) -> Tuple:
return AutoModelForCausalLM.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *__A , **__A ) -> int:
return AutoModelForMaskedLM.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *__A , **__A ) -> Union[str, Any]:
return AutoModelForSequenceClassification.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *__A , **__A ) -> List[Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__A , **__A )
| 42
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303
| 0
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class a_ (lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCAmelCase : int = """pixel_values"""
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : Optional[int] = TimmBackboneConfig
def __init__( self , snake_case_ , **snake_case_ ):
requires_backends(self , """timm""" )
super().__init__(_A )
_lowerCAmelCase : List[str] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(_A , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
_lowerCAmelCase : List[Any] = getattr(_A , """use_pretrained_backbone""" , _A )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
_lowerCAmelCase : Optional[int] = config.out_indices if getattr(_A , """out_indices""" , _A ) is not None else (-1,)
_lowerCAmelCase : Optional[int] = timm.create_model(
config.backbone , pretrained=_A , features_only=config.features_only , in_chans=config.num_channels , out_indices=_A , **_A , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_lowerCAmelCase : Optional[int] = self._backbone.return_layers
_lowerCAmelCase : Tuple = {layer['''module''']: str(_A ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_A )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""config""" , TimmBackboneConfig() )
_lowerCAmelCase : List[str] = kwargs.pop("""use_timm_backbone""" , _A )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
_lowerCAmelCase : Tuple = kwargs.pop("""num_channels""" , config.num_channels )
_lowerCAmelCase : Any = kwargs.pop("""features_only""" , config.features_only )
_lowerCAmelCase : List[str] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
_lowerCAmelCase : List[Any] = kwargs.pop("""out_indices""" , config.out_indices )
_lowerCAmelCase : List[Any] = TimmBackboneConfig(
backbone=_A , num_channels=_A , features_only=_A , use_pretrained_backbone=_A , out_indices=_A , )
return super()._from_config(_A , **_A )
def __UpperCamelCase ( self , snake_case_ ):
pass
def __UpperCamelCase ( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ):
_lowerCAmelCase : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_lowerCAmelCase : int = self._all_layers
_lowerCAmelCase : Optional[Any] = self._backbone(_A , **_A )
_lowerCAmelCase : Union[str, Any] = self._return_layers
_lowerCAmelCase : Union[str, Any] = tuple(hidden_states[i] for i in self.out_indices )
else:
_lowerCAmelCase : Optional[int] = self._backbone(_A , **_A )
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : str = tuple(_A )
_lowerCAmelCase : Dict = tuple(_A ) if hidden_states is not None else None
if not return_dict:
_lowerCAmelCase : int = (feature_maps,)
if output_hidden_states:
_lowerCAmelCase : int = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_A , hidden_states=_A , attentions=_A )
| 309
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : Dict = 400_0000 ) -> Any:
_a = [0, 1]
_a = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_a = 0
for j in range(len(lowercase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''luke'''
def __init__( self : Any , _A : int=5_0267 , _A : str=50_0000 , _A : Dict=768 , _A : int=256 , _A : Tuple=12 , _A : Optional[Any]=12 , _A : Any=3072 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Tuple=2 , _A : int=0.02 , _A : Any=1e-12 , _A : Dict=True , _A : Optional[Any]=None , _A : List[str]=1 , _A : List[str]=0 , _A : Dict=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = entity_vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = entity_emb_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
__SCREAMING_SNAKE_CASE : int = use_entity_aware_attention
__SCREAMING_SNAKE_CASE : Any = classifier_dropout
| 303
| 0
|
def lowerCAmelCase_ ( A_):
if not isinstance(A_ ,A_) or number < 0:
raise ValueError("Input must be a non-negative integer")
UpperCamelCase__: Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
def UpperCamelCase ( __lowercase : Tuple ):
'''simple docstring'''
if len(__lowercase ) <= 1:
return lst
A_ : List[Any] = 1
while i < len(__lowercase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A_ : List[str] = lst[i], lst[i - 1]
i -= 1
if i == 0:
A_ : Optional[Any] = 1
return lst
if __name__ == "__main__":
_UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCAmelCase = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 140
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = '''git_vision_model'''
def __init__( self , __lowerCAmelCase=768 , __lowerCAmelCase=3072 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3 , __lowerCAmelCase=224 , __lowerCAmelCase=16 , __lowerCAmelCase="quick_gelu" , __lowerCAmelCase=1E-5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_A)
lowerCAmelCase = hidden_size
lowerCAmelCase = intermediate_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = num_channels
lowerCAmelCase = patch_size
lowerCAmelCase = image_size
lowerCAmelCase = initializer_range
lowerCAmelCase = attention_dropout
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = hidden_act
@classmethod
def a_ ( cls , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
cls._set_token_in_kwargs(_A)
lowerCAmelCase = cls.get_config_dict(_A , **_A)
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""") == "git":
lowerCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_A , **_A)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''git'''
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=6 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1024 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-1_2 , __lowerCAmelCase=0 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=101 , __lowerCAmelCase=102 , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(bos_token_id=_A , eos_token_id=_A , pad_token_id=_A , **_A)
if vision_config is None:
lowerCAmelCase = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""")
lowerCAmelCase = GitVisionConfig(**_A)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = tie_word_embeddings
lowerCAmelCase = num_image_with_embedding
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.vision_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 272
|
import os
import sys
lowercase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoConfig.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModel.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
| 303
| 0
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __lowercase ( __lowerCAmelCase : Optional[int] ):
a__ = []
for line in lines:
a__ = re.sub(R'#.*' , '' , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
a__ = '''\n'''.join(__lowerCAmelCase )
# Make a hash from all this code
a__ = full_str.encode('utf-8' )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
snake_case : Dict = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
snake_case : Optional[Any] = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
snake_case : Optional[int] = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
snake_case : List[str] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 240
|
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303
| 0
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCamelCase :
'''simple docstring'''
def _a (self , _lowerCamelCase ):
"""simple docstring"""
raise NotImplementedError()
def _a (self ):
"""simple docstring"""
raise NotImplementedError()
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase = False , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = tokenizer
UpperCAmelCase__ : Any = skip_prompt
UpperCAmelCase__ : Union[str, Any] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : str = True
def _a (self , _lowerCamelCase ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
UpperCAmelCase__ : Any = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase__ : str = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase__ : int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
UpperCAmelCase__ : Dict = text[self.print_len :]
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : List[str] = 0
# If the last token is a CJK character, we print the characters.
elif len(_A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase__ : Optional[int] = text[self.print_len :]
self.print_len += len(_A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase__ : int = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(_A )
self.on_finalized_text(_A )
def _a (self ):
"""simple docstring"""
if len(self.token_cache ) > 0:
UpperCAmelCase__ : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCAmelCase__ : List[str] = text[self.print_len :]
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Any = 0
else:
UpperCAmelCase__ : Tuple = ''''''
UpperCAmelCase__ : str = True
self.on_finalized_text(_A , stream_end=_A )
def _a (self , _lowerCamelCase , _lowerCamelCase = False ):
"""simple docstring"""
print(_A , flush=_A , end="""""" if not stream_end else None )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = None , **_lowerCamelCase ):
"""simple docstring"""
super().__init__(_A , _A , **_A )
UpperCAmelCase__ : Union[str, Any] = Queue()
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[Any] = timeout
def _a (self , _lowerCamelCase , _lowerCamelCase = False ):
"""simple docstring"""
self.text_queue.put(_A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__(self ):
"""simple docstring"""
return self
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 171
|
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE : str = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE : Optional[int] = inputa
__SCREAMING_SNAKE_CASE : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=99 , _lowerCAmelCase : Any=36 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Dict=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=6 , _lowerCAmelCase : Any=6 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]=1_000 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = text_seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = coordinate_size
SCREAMING_SNAKE_CASE_ = shape_size
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ = text_seq_length
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ = self.text_seq_length + self.image_seq_length
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = LayoutLMvaModel(config=_A )
model.to(_A )
model.eval()
# text + image
SCREAMING_SNAKE_CASE_ = model(_A , pixel_values=_A )
SCREAMING_SNAKE_CASE_ = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = model(_A , bbox=_A , pixel_values=_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = model(_A , bbox=_A , pixel_values=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ = model(pixel_values=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = LayoutLMvaForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = LayoutLMvaForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = LayoutLMvaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ):
return True
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any]=False ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(_A )
if model_class in get_values(_A ):
SCREAMING_SNAKE_CASE_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
SCREAMING_SNAKE_CASE_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in get_values(_A ):
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in [
*get_values(_A ),
]:
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in [
*get_values(_A ),
]:
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_A , )
return inputs_dict
def lowerCAmelCase_ ( self : int ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ = type
self.model_tester.create_and_check_model(*_A )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def lowerCAmelCase_ ( self : Tuple ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = LayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCAmelCase_ ( ) -> Any:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(_A )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='pt' ).pixel_values.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE_ = model(
input_ids=input_ids.to(_A ) , bbox=bbox.to(_A ) , pixel_values=pixel_values.to(_A ) , )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1E-4 ) )
| 225
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303
| 0
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Any = BartphoTokenizer
_UpperCamelCase:List[str] = False
_UpperCamelCase:Tuple = True
def _snake_case ( self )-> Any:
super().setUp()
lowerCamelCase_ =['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
lowerCamelCase_ =dict(zip(_A , range(len(_A ) ) ) )
lowerCamelCase_ ={'''unk_token''': '''<unk>'''}
lowerCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
lowerCamelCase_ =BartphoTokenizer(_A , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> Tuple:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_A )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]:
lowerCamelCase_ ='''This is a là test'''
lowerCamelCase_ ='''This is a<unk><unk> test'''
return input_text, output_text
def _snake_case ( self )-> str:
lowerCamelCase_ =BartphoTokenizer(_A , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCamelCase_ ='''This is a là test'''
lowerCamelCase_ ='''▁This ▁is ▁a ▁l à ▁t est'''.split()
lowerCamelCase_ =tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowerCamelCase_ =tokens + [tokenizer.unk_token]
lowerCamelCase_ =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 154
|
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303
| 0
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int]=False ):
"""simple docstring"""
__a = OmegaConf.load(_SCREAMING_SNAKE_CASE )
if display:
print(yaml.dump(OmegaConf.to_container(_SCREAMING_SNAKE_CASE ) ) )
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
"""simple docstring"""
if conf_path is None:
__a = '''./model_checkpoints/vqgan_only.yaml'''
__a = load_config(_SCREAMING_SNAKE_CASE , display=_SCREAMING_SNAKE_CASE )
__a = VQModel(**config.model.params )
if ckpt_path is None:
__a = '''./model_checkpoints/vqgan_only.pt'''
__a = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
if ".ckpt" in ckpt_path:
__a = sd['''state_dict''']
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
del sd
return model
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = model.encode(_SCREAMING_SNAKE_CASE )
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__a = model.decode(_SCREAMING_SNAKE_CASE )
return xrec
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=False ):
"""simple docstring"""
__a = string.rsplit(""".""" , 1 )
if reload:
__a = importlib.import_module(_SCREAMING_SNAKE_CASE )
importlib.reload(_SCREAMING_SNAKE_CASE )
return getattr(importlib.import_module(_SCREAMING_SNAKE_CASE , package=_SCREAMING_SNAKE_CASE ) , cls )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Any=True ):
"""simple docstring"""
__a = instantiate_from_config(_SCREAMING_SNAKE_CASE )
if sd is not None:
model.load_state_dict(_SCREAMING_SNAKE_CASE )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if ckpt:
__a = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__a = pl_sd['''global_step''']
print(f"loaded model from global step {global_step}." )
else:
__a = {'''state_dict''': None}
__a = None
__a = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_SCREAMING_SNAKE_CASE , eval_mode=_SCREAMING_SNAKE_CASE )['''model''']
return model, global_step
| 302
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''', '''negative_prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor
__SCREAMING_SNAKE_CASE : str = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
__SCREAMING_SNAKE_CASE : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -10:]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 303
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowercase : List[Any] = None
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowercase : int = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
lowercase : int = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
lowercase : Dict = "▁"
# Segments (not really needed)
lowercase : int = 0
lowercase : Union[str, Any] = 1
lowercase : Optional[Any] = 2
lowercase : List[Any] = 3
lowercase : List[str] = 4
class __UpperCAmelCase ( lowerCAmelCase__ ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = """left"""
__lowercase = XLNetTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<sep>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<cls>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=["<eop>", "<eod>"] , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
vocab_file=_A , tokenizer_file=_A , do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , **_A , )
_snake_case = 3
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_snake_case = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 42
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(snake_case )} examples to process.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[str] = 10_000
__SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE : List[str] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(snake_case )} examples processed.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE : List[str] = [np.uintaa(snake_case ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 303
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_2 , snake_case_=3 , snake_case_=4 , snake_case_=[1_0, 2_0, 3_0, 4_0] , snake_case_=[2, 2, 3, 2] , snake_case_=True , snake_case_=True , snake_case_=3_7 , snake_case_="gelu" , snake_case_=1_0 , snake_case_=0.02 , snake_case_=["stage2", "stage3", "stage4"] , snake_case_=[2, 3, 4] , snake_case_=None , ):
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : Union[str, Any] = num_stages
_lowerCAmelCase : Tuple = hidden_sizes
_lowerCAmelCase : List[str] = depths
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Dict = num_labels
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Dict = out_features
_lowerCAmelCase : List[Any] = out_indices
_lowerCAmelCase : str = scope
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = ConvNextModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = ConvNextForImageClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = ConvNextBackbone(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[int] = model(_A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = ConvNextBackbone(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : str = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Optional[Any] = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = ConvNextModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self ):
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(_A )
_lowerCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : str = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_A )
def __UpperCamelCase ( self ):
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Any = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
_lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A , _A , _A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __UpperCamelCase ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = ConvNextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_A )
_lowerCAmelCase : int = self.default_image_processor
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A , return_tensors="""pt""" ).to(_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**_A )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
_lowerCAmelCase : Optional[int] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
@require_torch
class a_ (unittest.TestCase , lowerCAmelCase__ ):
__lowerCAmelCase : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
__lowerCAmelCase : str = ConvNextConfig
__lowerCAmelCase : List[Any] = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = ConvNextModelTester(self )
| 309
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303
| 0
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Optional[Any] ) -> Tuple:
_a = hf_hub_url(repo_id=lowercase , path=lowercase , revision=lowercase )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(lowercase )}'
| 63
|
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = grid.shape
__SCREAMING_SNAKE_CASE : Tuple = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = None
while queue:
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : int = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( lowerCAmelCase__):
"""simple docstring"""
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (("""num_inference_steps""", 50),)
def UpperCAmelCase_ ( self: List[Any] , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = {'''num_train_timesteps''': 1000}
config.update(**_A )
return config
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Dict=0 , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = dict(self.forward_default_kwargs )
UpperCamelCase__: Optional[int] = kwargs.pop("num_inference_steps" , _A )
UpperCamelCase__: List[str] = self.dummy_sample
UpperCamelCase__: str = 0.1 * sample
UpperCamelCase__: Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__: int = self.get_scheduler_config(**_A )
UpperCamelCase__: List[str] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
UpperCamelCase__: Optional[int] = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase__: Optional[int] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
UpperCamelCase__: Tuple = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
UpperCamelCase__: int = dummy_past_residuals[:]
UpperCamelCase__: Union[str, Any] = scheduler.step(_A , _A , _A , **_A ).prev_sample
UpperCamelCase__: List[str] = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase__: Any = scheduler.step(_A , _A , _A , **_A ).prev_sample
UpperCamelCase__: List[Any] = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Tuple=0 , **__lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = dict(self.forward_default_kwargs )
UpperCamelCase__: str = kwargs.pop("num_inference_steps" , _A )
UpperCamelCase__: Union[str, Any] = self.dummy_sample
UpperCamelCase__: Optional[Any] = 0.1 * sample
UpperCamelCase__: Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__: int = self.get_scheduler_config()
UpperCamelCase__: Tuple = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__: List[str] = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase__: Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
UpperCamelCase__: int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__: int = dummy_past_residuals[:]
UpperCamelCase__: Tuple = scheduler.step(_A , _A , _A , **_A ).prev_sample
UpperCamelCase__: int = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase__: Tuple = scheduler.step(_A , _A , _A , **_A ).prev_sample
UpperCamelCase__: Union[str, Any] = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self: Tuple , **__lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.scheduler_classes[0]
UpperCamelCase__: int = self.get_scheduler_config(**_A )
UpperCamelCase__: str = scheduler_class(**_A )
UpperCamelCase__: Any = 10
UpperCamelCase__: Dict = self.dummy_model()
UpperCamelCase__: Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__: Optional[int] = model(_A , _A )
UpperCamelCase__: Any = scheduler.step(_A , _A , _A ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__: Tuple = model(_A , _A )
UpperCamelCase__: Dict = scheduler.step(_A , _A , _A ).prev_sample
return sample
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = dict(self.forward_default_kwargs )
UpperCamelCase__: Tuple = kwargs.pop("num_inference_steps" , _A )
for scheduler_class in self.scheduler_classes:
UpperCamelCase__: List[str] = self.get_scheduler_config()
UpperCamelCase__: Dict = scheduler_class(**_A )
UpperCamelCase__: Any = self.dummy_sample
UpperCamelCase__: Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A , "set_timesteps" ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A , "set_timesteps" ):
UpperCamelCase__: Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__: List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase__: Union[str, Any] = dummy_past_residuals[:]
UpperCamelCase__: int = scheduler.timesteps[5]
UpperCamelCase__: str = scheduler.timesteps[6]
UpperCamelCase__: List[str] = scheduler.step(_A , _A , _A , **_A ).prev_sample
UpperCamelCase__: List[str] = scheduler.step(_A , _A , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase__: List[Any] = scheduler.step(_A , _A , _A , **_A ).prev_sample
UpperCamelCase__: Any = scheduler.step(_A , _A , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_A , time_step=_A )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_A , time_step=_A )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = self.full_loop()
UpperCamelCase__: List[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 149
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
from numpy import exp, pi, sqrt
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Any = 0.0 ,__lowercase : List[str] = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303
| 0
|
'''simple docstring'''
__lowercase = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
__lowercase = ['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case__ ( _A: str , _A: Any , _A: List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = start
# add current to visited
visited.append(_A )
lowerCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCAmelCase = topological_sort(_A , _A , _A )
# if all neighbors visited add current to sort
sort.append(_A )
# if all vertices haven't been visited select a new one to visit
if len(_A ) != len(_A ):
for vertice in vertices:
if vertice not in visited:
lowerCAmelCase = topological_sort(_A , _A , _A )
# return sort
return sort
if __name__ == "__main__":
__lowercase = topological_sort('''a''', [], [])
print(sort)
| 272
|
import sys
from collections import defaultdict
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pos
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE : List[Any] = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = temp, tempa
__SCREAMING_SNAKE_CASE : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE : Optional[Any] = heap[parent]
__SCREAMING_SNAKE_CASE : str = position[parent]
self.set_position(position[parent] , _A )
else:
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , _A )
break
__SCREAMING_SNAKE_CASE : List[Any] = parent
else:
__SCREAMING_SNAKE_CASE : Tuple = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , 0 )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = positions[0]
__SCREAMING_SNAKE_CASE : Tuple = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Heap()
__SCREAMING_SNAKE_CASE : int = [0] * len(snake_case )
__SCREAMING_SNAKE_CASE : Dict = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE : Dict = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE : Optional[int] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
__SCREAMING_SNAKE_CASE : int = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : int = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = ['''GLPNFeatureExtractor''']
snake_case : Any = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 240
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303
| 0
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=24 , _lowerCamelCase=2 , _lowerCamelCase=6 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=1000 , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : Union[str, Any] = seq_length
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : int = use_input_mask
UpperCAmelCase__ : Optional[int] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : List[str] = type_sequence_label_size
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Dict = scope
UpperCAmelCase__ : List[Any] = range_bbox
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Dict = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : Optional[Any] = t
UpperCAmelCase__ : Dict = None
if self.use_input_mask:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : int = None
UpperCAmelCase__ : int = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Any = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _a (self ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = LiltModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = model(_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
UpperCAmelCase__ : Dict = model(_A , bbox=_A , token_type_ids=_A )
UpperCAmelCase__ : List[Any] = model(_A , bbox=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Optional[Any] = LiltForTokenClassification(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = LiltForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return True
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = LiltModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Optional[int] = type
self.model_tester.create_and_check_model(*_A )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def _a (self ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = LiltModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(_A )
UpperCAmelCase__ : int = torch.tensor([[1, 2]] , device=_A )
UpperCAmelCase__ : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(input_ids=_A , bbox=_A )
UpperCAmelCase__ : Any = torch.Size([1, 2, 768] )
UpperCAmelCase__ : Dict = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=_A , )
self.assertTrue(outputs.last_hidden_state.shape , _A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _A , atol=1e-3 ) )
| 171
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def a__ ( snake_case , snake_case , snake_case=8 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__SCREAMING_SNAKE_CASE : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , _A : UNetaDConditionModel , _A : DDPMScheduler , _A : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__SCREAMING_SNAKE_CASE : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : List[Any] ):
"""simple docstring"""
if latents is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__SCREAMING_SNAKE_CASE : Tuple = latents.to(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self : Tuple , _A : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__SCREAMING_SNAKE_CASE : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
__SCREAMING_SNAKE_CASE : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase__ ( self : int , _A : Tuple=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__SCREAMING_SNAKE_CASE : str = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__SCREAMING_SNAKE_CASE : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__SCREAMING_SNAKE_CASE : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : Dict , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : torch.FloatTensor , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self._execution_device
__SCREAMING_SNAKE_CASE : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[str] = torch.cat(_A , dim=0 )
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : Dict = image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Any = negative_image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hint.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE : Tuple = self.movq.config.latent_channels
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : Dict = {'''image_embeds''': image_embeds, '''hint''': hint}
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = variance_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Any = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__SCREAMING_SNAKE_CASE : Any = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__SCREAMING_SNAKE_CASE : str = image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE : Tuple = image.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : str = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase_ = "xlm-roberta"
def __init__( self : str , _lowerCAmelCase : Tuple=30_522 , _lowerCAmelCase : Any=768 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : List[Any]=3_072 , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : int=512 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[str]=1E-12 , _lowerCAmelCase : Any=1 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Dict="absolute" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : List[Any] , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = classifier_dropout
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[int] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 225
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a__ ( snake_case ):
"""simple docstring"""
if "://" in dataset_path:
__SCREAMING_SNAKE_CASE : Any = dataset_path.split('''://''' )[1]
return dataset_path
def a__ ( snake_case ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = not is_remote_filesystem(snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case ) , fs._strip_protocol(snake_case ) )
else:
fs.mv(snake_case , snake_case , recursive=snake_case )
def a__ ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Lock()
| 303
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[str] = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
__lowerCamelCase : Union[str, Any] ='vit_mae'
def __init__( self : Union[str, Any] , __lowercase : Optional[Any]=768 , __lowercase : List[str]=12 , __lowercase : str=12 , __lowercase : int=3072 , __lowercase : List[str]="gelu" , __lowercase : str=0.0 , __lowercase : str=0.0 , __lowercase : Any=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : List[str]=224 , __lowercase : Dict=16 , __lowercase : Dict=3 , __lowercase : Optional[Any]=True , __lowercase : List[str]=16 , __lowercase : Dict=512 , __lowercase : Tuple=8 , __lowercase : List[Any]=2048 , __lowercase : List[str]=0.75 , __lowercase : int=False , **__lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__(**_A )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 302
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mra'''
def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = block_per_row
__SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode
__SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
| 303
| 0
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase : Dict = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
inspect_dataset(__A , __A )
_snake_case = path + '''.py'''
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Optional[int]:
inspect_metric(__A , __A )
_snake_case = path + '''.py'''
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[Any]:
_snake_case = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Optional[Any]:
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Dict:
_snake_case = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[str]:
_snake_case = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
_snake_case = expected_configs[0]
assert expected_config in infos
_snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Optional[Any]:
_snake_case = get_dataset_infos(__A )
assert expected_config in infos
_snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> int:
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 42
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class a_ (lowerCAmelCase__ ):
__lowerCAmelCase : Any = """audio-spectrogram-transformer"""
def __init__( self , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1_6 , snake_case_=True , snake_case_=1_0 , snake_case_=1_0 , snake_case_=1_0_2_4 , snake_case_=1_2_8 , **snake_case_ , ):
super().__init__(**_A )
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Tuple = patch_size
_lowerCAmelCase : Union[str, Any] = qkv_bias
_lowerCAmelCase : int = frequency_stride
_lowerCAmelCase : int = time_stride
_lowerCAmelCase : int = max_length
_lowerCAmelCase : List[str] = num_mel_bins
| 309
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase_ : Optional[int] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
"""simple docstring"""
__a ='retribert'
def __init__( self : List[str] , __a : Tuple=3_05_22 , __a : List[str]=7_68 , __a : int=8 , __a : Any=12 , __a : Optional[Any]=30_72 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[Any]=0.1 , __a : str=5_12 , __a : Optional[Any]=2 , __a : List[str]=0.02 , __a : Any=1e-1_2 , __a : int=True , __a : int=1_28 , __a : Dict=0 , **__a : List[str] , ):
super().__init__(pad_token_id=_A , **_A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = share_encoders
_a = projection_dim
| 63
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''luke'''
def __init__( self : Any , _A : int=5_0267 , _A : str=50_0000 , _A : Dict=768 , _A : int=256 , _A : Tuple=12 , _A : Optional[Any]=12 , _A : Any=3072 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Tuple=2 , _A : int=0.02 , _A : Any=1e-12 , _A : Dict=True , _A : Optional[Any]=None , _A : List[str]=1 , _A : List[str]=0 , _A : Dict=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = entity_vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = entity_emb_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
__SCREAMING_SNAKE_CASE : int = use_entity_aware_attention
__SCREAMING_SNAKE_CASE : Any = classifier_dropout
| 303
| 0
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCAmelCase_ ( A_ ,A_=1):
if n_shave_prefix_segments >= 0:
return ".".join(path.split(".")[n_shave_prefix_segments:])
else:
return ".".join(path.split(".")[:n_shave_prefix_segments])
def lowerCAmelCase_ ( A_ ,A_=0):
UpperCamelCase__: Union[str, Any] = []
for old_item in old_list:
UpperCamelCase__: List[str] = old_item.replace("in_layers.0" ,"norm1")
UpperCamelCase__: Dict = new_item.replace("in_layers.2" ,"conv1")
UpperCamelCase__: Any = new_item.replace("out_layers.0" ,"norm2")
UpperCamelCase__: Union[str, Any] = new_item.replace("out_layers.3" ,"conv2")
UpperCamelCase__: Tuple = new_item.replace("emb_layers.1" ,"time_emb_proj")
UpperCamelCase__: Dict = new_item.replace("skip_connection" ,"conv_shortcut")
UpperCamelCase__: List[Any] = shave_segments(A_ ,n_shave_prefix_segments=A_)
mapping.append({"old": old_item, "new": new_item})
return mapping
def lowerCAmelCase_ ( A_ ,A_=0):
UpperCamelCase__: List[str] = []
for old_item in old_list:
UpperCamelCase__: Dict = old_item
UpperCamelCase__: Dict = new_item.replace("norm.weight" ,"group_norm.weight")
UpperCamelCase__: int = new_item.replace("norm.bias" ,"group_norm.bias")
UpperCamelCase__: Any = new_item.replace("proj_out.weight" ,"proj_attn.weight")
UpperCamelCase__: Union[str, Any] = new_item.replace("proj_out.bias" ,"proj_attn.bias")
UpperCamelCase__: Any = shave_segments(A_ ,n_shave_prefix_segments=A_)
mapping.append({"old": old_item, "new": new_item})
return mapping
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_=None ,A_=None ,A_=None):
assert isinstance(A_ ,A_), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCamelCase__: Optional[int] = old_checkpoint[path]
UpperCamelCase__: Tuple = old_tensor.shape[0] // 3
UpperCamelCase__: Optional[int] = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
UpperCamelCase__: Union[str, Any] = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCamelCase__: str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
UpperCamelCase__: Optional[int] = old_tensor.split(channels // num_heads ,dim=1)
UpperCamelCase__: Tuple = query.reshape(A_)
UpperCamelCase__: Optional[Any] = key.reshape(A_)
UpperCamelCase__: Union[str, Any] = value.reshape(A_)
for path in paths:
UpperCamelCase__: List[Any] = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCamelCase__: List[Any] = new_path.replace("middle_block.0" ,"mid_block.resnets.0")
UpperCamelCase__: str = new_path.replace("middle_block.1" ,"mid_block.attentions.0")
UpperCamelCase__: Any = new_path.replace("middle_block.2" ,"mid_block.resnets.1")
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCamelCase__: Optional[int] = new_path.replace(replacement["old"] ,replacement["new"])
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCamelCase__: Tuple = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCamelCase__: int = old_checkpoint[path['''old''']]
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: List[str] = {}
UpperCamelCase__: str = checkpoint['''time_embed.0.weight''']
UpperCamelCase__: List[Any] = checkpoint['''time_embed.0.bias''']
UpperCamelCase__: Any = checkpoint['''time_embed.2.weight''']
UpperCamelCase__: Optional[int] = checkpoint['''time_embed.2.bias''']
UpperCamelCase__: str = checkpoint['''input_blocks.0.0.weight''']
UpperCamelCase__: Optional[int] = checkpoint['''input_blocks.0.0.bias''']
UpperCamelCase__: int = checkpoint['''out.0.weight''']
UpperCamelCase__: Optional[int] = checkpoint['''out.0.bias''']
UpperCamelCase__: int = checkpoint['''out.2.weight''']
UpperCamelCase__: int = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCamelCase__: List[Any] = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "input_blocks" in layer})
UpperCamelCase__: int = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(A_)
}
# Retrieves the keys for the middle blocks only
UpperCamelCase__: Any = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "middle_block" in layer})
UpperCamelCase__: List[str] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(A_)
}
# Retrieves the keys for the output blocks only
UpperCamelCase__: List[str] = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "output_blocks" in layer})
UpperCamelCase__: str = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(A_)
}
for i in range(1 ,A_):
UpperCamelCase__: Tuple = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCamelCase__: Dict = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCamelCase__: List[Any] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
UpperCamelCase__: Optional[Any] = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
UpperCamelCase__: List[str] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
UpperCamelCase__: str = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
UpperCamelCase__: Dict = renew_resnet_paths(A_)
UpperCamelCase__: Tuple = {'''old''': F"input_blocks.{i}.0", '''new''': F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
UpperCamelCase__: Optional[Any] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A_ ,A_ ,A_ ,additional_replacements=[meta_path, resnet_op] ,config=A_)
if len(A_):
UpperCamelCase__: List[str] = renew_attention_paths(A_)
UpperCamelCase__: Dict = {
'''old''': F"input_blocks.{i}.1",
'''new''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCamelCase__: Any = {
F"input_blocks.{i}.1.qkv.bias": {
'''key''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'''query''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'''value''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
'''key''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'''query''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'''value''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
A_ ,A_ ,A_ ,additional_replacements=[meta_path] ,attention_paths_to_split=A_ ,config=A_ ,)
UpperCamelCase__: int = middle_blocks[0]
UpperCamelCase__: Optional[Any] = middle_blocks[1]
UpperCamelCase__: int = middle_blocks[2]
UpperCamelCase__: str = renew_resnet_paths(A_)
assign_to_checkpoint(A_ ,A_ ,A_ ,config=A_)
UpperCamelCase__: Dict = renew_resnet_paths(A_)
assign_to_checkpoint(A_ ,A_ ,A_ ,config=A_)
UpperCamelCase__: List[Any] = renew_attention_paths(A_)
UpperCamelCase__: Dict = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A_ ,A_ ,A_ ,attention_paths_to_split=A_ ,config=A_)
for i in range(A_):
UpperCamelCase__: int = i // (config['''num_res_blocks'''] + 1)
UpperCamelCase__: Dict = i % (config['''num_res_blocks'''] + 1)
UpperCamelCase__: int = [shave_segments(A_ ,2) for name in output_blocks[i]]
UpperCamelCase__: Union[str, Any] = {}
for layer in output_block_layers:
UpperCamelCase__: List[Any] = layer.split(".")[0], shave_segments(A_ ,1)
if layer_id in output_block_list:
output_block_list[layer_id].append(A_)
else:
UpperCamelCase__: Tuple = [layer_name]
if len(A_) > 1:
UpperCamelCase__: Dict = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
UpperCamelCase__: Any = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
UpperCamelCase__: Tuple = renew_resnet_paths(A_)
UpperCamelCase__: List[Any] = renew_resnet_paths(A_)
UpperCamelCase__: Union[str, Any] = {'''old''': F"output_blocks.{i}.0", '''new''': F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(A_ ,A_ ,A_ ,additional_replacements=[meta_path] ,config=A_)
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCamelCase__: Optional[int] = list(output_block_list.values()).index(["conv.weight", "conv.bias"])
UpperCamelCase__: List[Any] = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
UpperCamelCase__: List[Any] = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(A_) == 2:
UpperCamelCase__: int = []
if len(A_):
UpperCamelCase__: List[str] = renew_attention_paths(A_)
UpperCamelCase__: int = {
'''old''': F"output_blocks.{i}.1",
'''new''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCamelCase__: Tuple = {
F"output_blocks.{i}.1.qkv.bias": {
'''key''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'''query''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'''value''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
'''key''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'''query''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'''value''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
A_ ,A_ ,A_ ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("qkv" in key for key in attentions) else None ,config=A_ ,)
else:
UpperCamelCase__: Tuple = renew_resnet_paths(A_ ,n_shave_prefix_segments=1)
for path in resnet_0_paths:
UpperCamelCase__: Any = '''.'''.join(["output_blocks", str(A_), path["old"]])
UpperCamelCase__: Optional[Any] = '''.'''.join(["up_blocks", str(A_), "resnets", str(A_), path["new"]])
UpperCamelCase__: Dict = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A__: Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A__: List[str] = parser.parse_args()
A__: Dict = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A__: Any = json.loads(f.read())
A__: Dict = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A__: Optional[Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A__: Any = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A__: Any = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A__: Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 149
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
from math import factorial
def UpperCamelCase ( __lowercase : Dict = 20 ):
'''simple docstring'''
A_ : List[str] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
A_ : Optional[Any] = n // 2
return int(factorial(__lowercase ) / (factorial(__lowercase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
_UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 140
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__lowercase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case__ ( _A: Any , _A: Dict , _A: str , _A: Any , _A: Tuple ) -> str:
'''simple docstring'''
for attribute in key.split(""".""" ):
lowerCAmelCase = getattr(_A , _A )
if weight_type is not None:
lowerCAmelCase = getattr(_A , _A ).shape
else:
lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case__ ( _A: str , _A: Any ) -> int:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = fairseq_model.state_dict()
lowerCAmelCase = hf_model.feature_extractor
lowerCAmelCase = hf_model.adapter
for name, value in fairseq_dict.items():
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(_A , _A , _A , _A )
lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(_A )[0].split(""".""" )[-2]
lowerCAmelCase = mapped_key.replace("""*""" , _A )
if "weight_g" in name:
lowerCAmelCase = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase = '''weight_v'''
elif "bias" in name:
lowerCAmelCase = '''bias'''
elif "weight" in name:
lowerCAmelCase = '''weight'''
else:
lowerCAmelCase = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f"Unused weights: {unused_weights}" )
def snake_case__ ( _A: Any , _A: List[Any] , _A: List[str] , _A: Optional[int] , _A: List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase = name.split(""".""" )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_A )
def snake_case__ ( _A: Any , _A: Any , _A: List[str] , _A: int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = full_name.split("""adaptor.""" )[-1]
lowerCAmelCase = name.split(""".""" )
if items[1].isdigit():
lowerCAmelCase = int(items[1] )
else:
lowerCAmelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
lowerCAmelCase = value
logger.info(f"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
lowerCAmelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
lowerCAmelCase = value
logger.info(f"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
lowerCAmelCase = value
logger.info(f"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(_A , _A ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
lowerCAmelCase = value
logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
lowerCAmelCase = value
logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(_A )
def snake_case__ ( _A: str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(_A , _A , bias=_A )
lowerCAmelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def snake_case__ ( _A: Any , _A: Dict , _A: Dict , _A: Optional[int] , _A: Any , _A: Any , _A: Dict , _A: Tuple , _A: List[Any] , _A: Union[str, Any] , _A: List[str] , ) -> Any:
'''simple docstring'''
lowerCAmelCase = WavaVecaConfig.from_pretrained(
_A , add_adapter=_A , adapter_stride=_A , adapter_kernel_size=_A , use_auth_token=_A , output_hidden_size=_A , )
lowerCAmelCase = MBartConfig.from_pretrained(_A )
# load model
lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
lowerCAmelCase = model[0].eval()
# load feature extractor
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_A , use_auth_token=_A )
# set weights for wav2vec2 encoder
lowerCAmelCase = WavaVecaModel(_A )
recursively_load_weights_wavaveca(model.encoder , _A )
# load decoder weights
lowerCAmelCase = MBartForCausalLM(_A )
lowerCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_A )
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
lowerCAmelCase = SpeechEncoderDecoderModel(encoder=_A , decoder=_A )
lowerCAmelCase = False
lowerCAmelCase = MBartaaTokenizer(_A )
tokenizer.save_pretrained(_A )
lowerCAmelCase = hf_wavavec.config.to_dict()
lowerCAmelCase = tokenizer.pad_token_id
lowerCAmelCase = tokenizer.bos_token_id
lowerCAmelCase = tokenizer.eos_token_id
lowerCAmelCase = '''mbart50'''
lowerCAmelCase = '''wav2vec2'''
lowerCAmelCase = tokenizer.eos_token_id
lowerCAmelCase = 250004
lowerCAmelCase = tokenizer.eos_token_id
lowerCAmelCase = SpeechEncoderDecoderConfig.from_dict(_A )
hf_wavavec.save_pretrained(_A )
feature_extractor.save_pretrained(_A )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_0_2_4, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=2_5_0_0_0_4, type=int, help='''`decoder_start_token_id` of model config''')
__lowercase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 272
|
import os
import sys
lowercase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoConfig.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModel.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
| 303
| 0
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
snake_case : Optional[Any] = logging.get_logger(__name__)
class snake_case_ (lowerCAmelCase__ ):
def __init__( self :Union[str, Any] ,*__snake_case :Dict ,**__snake_case :List[Any] ) -> List[str]:
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,_A ,)
super().__init__(*_A ,**_A )
| 240
|
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303
| 0
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_A = open # noqa: we just need to have a builtin inside this module to test it properly
| 171
|
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE : str = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE : Optional[int] = inputa
__SCREAMING_SNAKE_CASE : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303
| 0
|
def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> int:
if not head:
return True
# split the list to two parts
SCREAMING_SNAKE_CASE_ = head.next, head
while fast and fast.next:
SCREAMING_SNAKE_CASE_ = fast.next.next
SCREAMING_SNAKE_CASE_ = slow.next
SCREAMING_SNAKE_CASE_ = slow.next
SCREAMING_SNAKE_CASE_ = None # Don't forget here! But forget still works!
# reverse the second part
SCREAMING_SNAKE_CASE_ = None
while second:
SCREAMING_SNAKE_CASE_ = second.next
SCREAMING_SNAKE_CASE_ = node
SCREAMING_SNAKE_CASE_ = second
SCREAMING_SNAKE_CASE_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
SCREAMING_SNAKE_CASE_ = node.next
SCREAMING_SNAKE_CASE_ = head.next
return True
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
SCREAMING_SNAKE_CASE_ = head
while fast and fast.next:
SCREAMING_SNAKE_CASE_ = fast.next.next, slow.next
# 2. Push the second half into the stack
SCREAMING_SNAKE_CASE_ = [slow.val]
while slow.next:
SCREAMING_SNAKE_CASE_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
SCREAMING_SNAKE_CASE_ = cur.next
return True
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> List[str]:
if not head or not head.next:
return True
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
while head:
if head.val in d:
d[head.val].append(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = [pos]
SCREAMING_SNAKE_CASE_ = head.next
pos += 1
SCREAMING_SNAKE_CASE_ = pos - 1
SCREAMING_SNAKE_CASE_ = 0
for v in d.values():
if len(__UpperCAmelCase ) % 2 != 0:
middle += 1
else:
SCREAMING_SNAKE_CASE_ = 0
for i in range(0 , len(__UpperCAmelCase ) ):
if v[i] + v[len(__UpperCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 225
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Any = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Union[str, Any] = "roberta"
def __init__( self , _SCREAMING_SNAKE_CASE=5_0265 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )-> List[str]:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =classifier_dropout
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
@property
def _snake_case ( self )-> List[str]:
if self.task == "multiple-choice":
lowerCamelCase_ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 154
|
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = [[1, 2, 4], [1, 2, 3, 4]]
__a = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = [[1, 2, 3], [1, 2, 4]]
__a = DisjunctiveConstraint(_A )
__a = dc.update(1 )
__a = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__a = dc.update(2 )
__a = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__a = dc.update(3 )
__a = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__a = DisjunctiveConstraint(_A )
__a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 302
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''', '''negative_prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor
__SCREAMING_SNAKE_CASE : str = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
__SCREAMING_SNAKE_CASE : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -10:]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 303
| 0
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]:
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Union[str, Any]:
_snake_case = np.nan
for i in range(__A ):
_snake_case = features[:, labels == i]
_snake_case = data.mean(1 )
# Centralize the data of class i
_snake_case = data - column_reshape(__A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_snake_case = np.dot(__A , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[str]:
_snake_case = features.mean(1 )
_snake_case = np.nan
for i in range(__A ):
_snake_case = features[:, labels == i]
_snake_case = data.shape[1]
_snake_case = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__A ) - column_reshape(__A ) , (column_reshape(__A ) - column_reshape(__A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_snake_case = device_data * np.dot(
column_reshape(__A ) - column_reshape(__A ) , (column_reshape(__A ) - column_reshape(__A )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> List[Any]:
# Check if the features have been loaded
if features.any():
_snake_case = features.mean(1 )
# Center the dataset
_snake_case = features - np.reshape(__A , (data_mean.size, 1) )
_snake_case = np.dot(__A , centered_data.T ) / features.shape[1]
_snake_case = np.linalg.eigh(__A )
# Take all the columns in the reverse order (-1), and then takes only the first
_snake_case = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_snake_case = np.dot(filtered_eigenvectors.T , __A )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=__A )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> int:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_snake_case = eigh(
covariance_between_classes(__A , __A , __A ) , covariance_within_classes(__A , __A , __A ) , )
_snake_case = eigenvectors[:, ::-1][:, :dimensions]
_snake_case = np.linalg.svd(__A )
_snake_case = svd_matrix[:, 0:dimensions]
_snake_case = np.dot(filtered_svd_matrix.T , __A )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=__A )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
_snake_case = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_snake_case = np.array([0, 0, 0, 1, 1] )
_snake_case = 2
_snake_case = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__A ) as error_info:
_snake_case = linear_discriminant_analysis(
__A , __A , __A , __A )
if isinstance(__A , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
_snake_case = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_snake_case = 2
_snake_case = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__A ) as error_info:
_snake_case = principal_component_analysis(__A , __A )
if not np.allclose(__A , __A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(snake_case )} examples to process.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[str] = 10_000
__SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE : List[str] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(snake_case )} examples processed.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE : List[str] = [np.uintaa(snake_case ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 303
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ (lowerCAmelCase__ ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , ):
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
_lowerCAmelCase : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
_lowerCAmelCase : Optional[Any] = int(_A )
_lowerCAmelCase : List[str] = dict(sorted(self.labels.items() ) )
def __UpperCamelCase ( self , snake_case_ ):
if not isinstance(_A , _A ):
_lowerCAmelCase : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , snake_case_ , snake_case_ = 4.0 , snake_case_ = None , snake_case_ = 5_0 , snake_case_ = "pil" , snake_case_ = True , ):
_lowerCAmelCase : List[str] = len(_A )
_lowerCAmelCase : Optional[Any] = self.transformer.config.sample_size
_lowerCAmelCase : List[Any] = self.transformer.config.in_channels
_lowerCAmelCase : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
_lowerCAmelCase : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_lowerCAmelCase : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
_lowerCAmelCase : Any = torch.tensor([1_0_0_0] * batch_size , device=self.device )
_lowerCAmelCase : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_lowerCAmelCase : Optional[Any] = latent_model_input[: len(_A ) // 2]
_lowerCAmelCase : List[Any] = torch.cat([half, half] , dim=0 )
_lowerCAmelCase : int = self.scheduler.scale_model_input(_A , _A )
_lowerCAmelCase : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_lowerCAmelCase : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
_lowerCAmelCase : List[Any] = torch.floataa if is_mps else torch.floataa
else:
_lowerCAmelCase : int = torch.intaa if is_mps else torch.intaa
_lowerCAmelCase : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_lowerCAmelCase : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_lowerCAmelCase : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
_lowerCAmelCase : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_lowerCAmelCase : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
_lowerCAmelCase : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_lowerCAmelCase : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
_lowerCAmelCase : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_lowerCAmelCase : str = torch.split(_A , _A , dim=1 )
else:
_lowerCAmelCase : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
_lowerCAmelCase : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
_lowerCAmelCase : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
_lowerCAmelCase : Optional[Any] = latent_model_input
_lowerCAmelCase : List[Any] = 1 / self.vae.config.scaling_factor * latents
_lowerCAmelCase : List[str] = self.vae.decode(_A ).sample
_lowerCAmelCase : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 309
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303
| 0
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 63
|
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = grid.shape
__SCREAMING_SNAKE_CASE : Tuple = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = None
while queue:
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : int = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict=13 , __lowerCamelCase: str=7 , __lowerCamelCase: Tuple=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=True , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: Optional[int]=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: List[str]=5 , __lowerCamelCase: List[Any]=4 , __lowerCamelCase: Dict=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Any=0.1 , __lowerCamelCase: int=128 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: Any=16 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: Union[str, Any]=None , ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = parent
UpperCamelCase__: Union[str, Any] = batch_size
UpperCamelCase__: int = seq_length
UpperCamelCase__: Any = is_training
UpperCamelCase__: Any = use_input_mask
UpperCamelCase__: Optional[Any] = use_token_type_ids
UpperCamelCase__: Tuple = use_labels
UpperCamelCase__: Union[str, Any] = vocab_size
UpperCamelCase__: Optional[Any] = hidden_size
UpperCamelCase__: Optional[Any] = num_hidden_layers
UpperCamelCase__: Dict = num_attention_heads
UpperCamelCase__: List[str] = intermediate_size
UpperCamelCase__: Optional[int] = hidden_act
UpperCamelCase__: List[Any] = hidden_dropout_prob
UpperCamelCase__: Tuple = attention_probs_dropout_prob
UpperCamelCase__: Tuple = max_position_embeddings
UpperCamelCase__: str = type_vocab_size
UpperCamelCase__: List[Any] = type_sequence_label_size
UpperCamelCase__: List[Any] = initializer_range
UpperCamelCase__: int = num_labels
UpperCamelCase__: Optional[Any] = num_choices
UpperCamelCase__: List[Any] = scope
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__: Optional[int] = None
if self.use_input_mask:
UpperCamelCase__: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__: Dict = None
if self.use_token_type_ids:
UpperCamelCase__: int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__: Optional[int] = None
UpperCamelCase__: Tuple = None
UpperCamelCase__: Dict = None
if self.use_labels:
UpperCamelCase__: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__: Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__: Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
(
UpperCamelCase__
): Tuple = self.prepare_config_and_inputs()
UpperCamelCase__: Optional[Any] = True
UpperCamelCase__: Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Any = NezhaModel(config=_A )
model.to(_A )
model.eval()
UpperCamelCase__: Dict = model(_A , attention_mask=_A , token_type_ids=_A )
UpperCamelCase__: str = model(_A , token_type_ids=_A )
UpperCamelCase__: Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = True
UpperCamelCase__: Any = NezhaModel(_A )
model.to(_A )
model.eval()
UpperCamelCase__: Dict = model(
_A , attention_mask=_A , token_type_ids=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
UpperCamelCase__: Dict = model(
_A , attention_mask=_A , token_type_ids=_A , encoder_hidden_states=_A , )
UpperCamelCase__: List[Any] = model(_A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = NezhaForMaskedLM(config=_A )
model.to(_A )
model.eval()
UpperCamelCase__: Dict = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = NezhaForNextSentencePrediction(config=_A )
model.to(_A )
model.eval()
UpperCamelCase__: List[Any] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
UpperCamelCase__: str = NezhaForPreTraining(config=_A )
model.to(_A )
model.eval()
UpperCamelCase__: Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , next_sentence_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Any , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = NezhaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
UpperCamelCase__: List[str] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.num_labels
UpperCamelCase__: Optional[int] = NezhaForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCamelCase__: str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: Dict , __lowerCamelCase: List[str] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.num_labels
UpperCamelCase__: Optional[Any] = NezhaForTokenClassification(config=_A )
model.to(_A )
model.eval()
UpperCamelCase__: List[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self: str , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.num_choices
UpperCamelCase__: List[str] = NezhaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
UpperCamelCase__: Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__: Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__: List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__: str = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.prepare_config_and_inputs()
(
UpperCamelCase__
): int = config_and_inputs
UpperCamelCase__: Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict=False ):
'''simple docstring'''
UpperCamelCase__: List[Any] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
UpperCamelCase__: str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
UpperCamelCase__: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Tuple = NezhaModelTester(self )
UpperCamelCase__: str = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
(
UpperCamelCase__
): str = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__: int = None
self.model_tester.create_and_check_model_as_decoder(
_A , _A , _A , _A , _A , _A , _A , _A , _A , )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_A )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__: Any = NezhaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCamelCase__: Dict = True
UpperCamelCase__: int = model_class(config=_A )
UpperCamelCase__: Optional[Any] = self._prepare_for_class(_A , _A )
UpperCamelCase__: Optional[int] = torch.jit.trace(
_A , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , "bert.pt" ) )
UpperCamelCase__: int = torch.jit.load(os.path.join(_A , "bert.pt" ) , map_location=_A )
loaded(inputs_dict["input_ids"].to(_A ) , inputs_dict["attention_mask"].to(_A ) )
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
UpperCamelCase__: str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__: Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__: Optional[Any] = model(_A , attention_mask=_A )[0]
UpperCamelCase__: Optional[int] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _A )
UpperCamelCase__: str = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
UpperCamelCase__: str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__: Any = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__: Dict = model(_A , attention_mask=_A )[0]
UpperCamelCase__: Dict = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , _A )
UpperCamelCase__: int = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 149
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = inspect.getfile(accelerate.test_utils )
A_ : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
A_ : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
A_ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def lowerCAmelCase_ ( self ):
"""simple docstring"""
print(F'''Found {torch.cuda.device_count()} devices.''' )
A_ : str = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self ):
"""simple docstring"""
print(F'''Found {torch.cuda.device_count()} devices.''' )
A_ : Any = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self ):
"""simple docstring"""
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
A_ : Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
_UpperCAmelCase = Accelerator()
_UpperCAmelCase = (accelerator.state.process_index + 2, 10)
_UpperCAmelCase = torch.randint(0, 10, shape).to(accelerator.device)
_UpperCAmelCase = """"""
_UpperCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_UpperCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_UpperCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 140
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 303
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''luke'''
def __init__( self , __lowerCAmelCase=50267 , __lowerCAmelCase=500000 , __lowerCAmelCase=768 , __lowerCAmelCase=256 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-1_2 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
lowerCAmelCase = vocab_size
lowerCAmelCase = entity_vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = entity_emb_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_entity_aware_attention
lowerCAmelCase = classifier_dropout
| 272
|
import sys
from collections import defaultdict
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pos
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE : List[Any] = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = temp, tempa
__SCREAMING_SNAKE_CASE : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE : Optional[Any] = heap[parent]
__SCREAMING_SNAKE_CASE : str = position[parent]
self.set_position(position[parent] , _A )
else:
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , _A )
break
__SCREAMING_SNAKE_CASE : List[Any] = parent
else:
__SCREAMING_SNAKE_CASE : Tuple = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , 0 )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = positions[0]
__SCREAMING_SNAKE_CASE : Tuple = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Heap()
__SCREAMING_SNAKE_CASE : int = [0] * len(snake_case )
__SCREAMING_SNAKE_CASE : Dict = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE : Dict = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE : Optional[int] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
__SCREAMING_SNAKE_CASE : int = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 0
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ (lowerCAmelCase__ , unittest.TestCase ):
UpperCAmelCase__ : Any = LayoutLMTokenizer
UpperCAmelCase__ : List[Any] = LayoutLMTokenizerFast
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : List[Any] = True
def lowerCamelCase__( self :str ) -> Optional[Any]:
super().setUp()
a__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCamelCase__( self :Any ,**__snake_case :Any ) -> List[str]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_A )
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ) -> Union[str, Any]:
a__ = '''UNwant\u00E9d,running'''
a__ = '''unwanted, running'''
return input_text, output_text
def lowerCamelCase__( self :Any ) -> List[str]:
a__ = self.tokenizer_class(self.vocab_file )
a__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) ,[7, 4, 5, 10, 8, 9] )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
pass
| 240
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303
| 0
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_A = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_A = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
_A = BeautifulSoup(res.text, """html.parser""")
_A = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(f'''https://google.com{link.get("href")}''')
| 171
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def a__ ( snake_case , snake_case , snake_case=8 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__SCREAMING_SNAKE_CASE : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , _A : UNetaDConditionModel , _A : DDPMScheduler , _A : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__SCREAMING_SNAKE_CASE : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : List[Any] ):
"""simple docstring"""
if latents is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__SCREAMING_SNAKE_CASE : Tuple = latents.to(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self : Tuple , _A : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__SCREAMING_SNAKE_CASE : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
__SCREAMING_SNAKE_CASE : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase__ ( self : int , _A : Tuple=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__SCREAMING_SNAKE_CASE : str = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__SCREAMING_SNAKE_CASE : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__SCREAMING_SNAKE_CASE : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : Dict , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : torch.FloatTensor , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self._execution_device
__SCREAMING_SNAKE_CASE : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[str] = torch.cat(_A , dim=0 )
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : Dict = image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Any = negative_image_embeds.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hint.repeat_interleave(_A , dim=0 )
__SCREAMING_SNAKE_CASE : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE : Tuple = self.movq.config.latent_channels
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : Dict = {'''image_embeds''': image_embeds, '''hint''': hint}
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = variance_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Any = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__SCREAMING_SNAKE_CASE : Any = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__SCREAMING_SNAKE_CASE : str = image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE : Tuple = image.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
import math
import os
import sys
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = ''''''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE_ = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE_ = f"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any ) -> str:
lexicon.pop(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE_ = '''0''' + lexicon[curr_key]
SCREAMING_SNAKE_CASE_ = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = {'''0''': '''0''', '''1''': '''1'''}
SCREAMING_SNAKE_CASE_ = '''''', ''''''
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
SCREAMING_SNAKE_CASE_ = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE_ = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = os.path.getsize(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = bin(__UpperCAmelCase )[2:]
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE_ = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = read_file_binary(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = compress_data(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 225
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a__ ( snake_case ):
"""simple docstring"""
if "://" in dataset_path:
__SCREAMING_SNAKE_CASE : Any = dataset_path.split('''://''' )[1]
return dataset_path
def a__ ( snake_case ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = not is_remote_filesystem(snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case ) , fs._strip_protocol(snake_case ) )
else:
fs.mv(snake_case , snake_case , recursive=snake_case )
def a__ ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Lock()
| 303
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Union[str, Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[int] = "distilbert"
_UpperCamelCase:Tuple = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=4 * 768 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE , )-> Any:
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =sinusoidal_pos_embds
lowerCamelCase_ =n_layers
lowerCamelCase_ =n_heads
lowerCamelCase_ =dim
lowerCamelCase_ =hidden_dim
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation
lowerCamelCase_ =initializer_range
lowerCamelCase_ =qa_dropout
lowerCamelCase_ =seq_classif_dropout
super().__init__(**_A , pad_token_id=_A )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
@property
def _snake_case ( self )-> List[Any]:
if self.task == "multiple-choice":
lowerCamelCase_ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 154
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
lowerCamelCase__ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
lowerCamelCase__ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
lowerCamelCase__ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mra'''
def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = block_per_row
__SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode
__SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
| 303
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : List[Any] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __UpperCAmelCase ( lowerCAmelCase__ ):
__lowercase = """time_series_transformer"""
__lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "student_t" , lowerCAmelCase_ = "nll" , lowerCAmelCase_ = 1 , lowerCAmelCase_ = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase_ = "mean" , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = True , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = prediction_length
_snake_case = context_length or prediction_length
_snake_case = distribution_output
_snake_case = loss
_snake_case = input_size
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = scaling
_snake_case = num_dynamic_real_features
_snake_case = num_static_real_features
_snake_case = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_snake_case = cardinality
else:
_snake_case = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_snake_case = embedding_dimension
else:
_snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case = num_parallel_samples
# Transformer architecture configuration
_snake_case = input_size * len(_A ) + self._number_of_features
_snake_case = d_model
_snake_case = encoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = encoder_ffn_dim
_snake_case = decoder_ffn_dim
_snake_case = encoder_layers
_snake_case = decoder_layers
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = activation_function
_snake_case = init_std
_snake_case = use_cache
super().__init__(is_encoder_decoder=_A , **_A )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303
| 0
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , ) -> int:
_lowerCAmelCase : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
_lowerCAmelCase : List[Any] = init[0]
_lowerCAmelCase : Union[str, Any] = init[1]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = g + heuristic[x][y] # cost from starting cell to destination cell
_lowerCAmelCase : int = [[f, g, x, y]]
_lowerCAmelCase : List[str] = False # flag that is set when search is complete
_lowerCAmelCase : Dict = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowerCAmelCase : Optional[Any] = cell.pop()
_lowerCAmelCase : int = next_cell[2]
_lowerCAmelCase : Optional[int] = next_cell[3]
_lowerCAmelCase : Any = next_cell[1]
if x == goal[0] and y == goal[1]:
_lowerCAmelCase : int = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
_lowerCAmelCase : Any = x + DIRECTIONS[i][0]
_lowerCAmelCase : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowerCAmelCase : Dict = g + cost
_lowerCAmelCase : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : str = i
_lowerCAmelCase : int = []
_lowerCAmelCase : str = goal[0]
_lowerCAmelCase : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowerCAmelCase : Tuple = x - DIRECTIONS[action[x][y]][0]
_lowerCAmelCase : Tuple = y - DIRECTIONS[action[x][y]][1]
_lowerCAmelCase : Dict = xa
_lowerCAmelCase : Dict = ya
invpath.append([x, y] )
_lowerCAmelCase : Tuple = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase_ = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase_ = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase_ = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase_ = 99
UpperCamelCase_ , UpperCamelCase_ = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 309
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303
| 0
|
'''simple docstring'''
import os
import pytest
from attr import dataclass
lowerCAmelCase_ : Any = 'us-east-1' # defaults region
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42
__a ='arn:aws:iam::558105141721:role/sagemaker_execution_role'
__a ={
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
__a ={**hyperparameters, 'max_steps': 1000}
@property
def UpperCamelCase__ ( self : Dict ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCamelCase__ ( self : Dict ):
return f'{self.framework}-transfromers-test'
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def _lowerCamelCase ( lowercase : List[Any] ) -> Dict:
_a = SageMakerTestEnvironment(framework=request.cls.framework )
| 63
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''luke'''
def __init__( self : Any , _A : int=5_0267 , _A : str=50_0000 , _A : Dict=768 , _A : int=256 , _A : Tuple=12 , _A : Optional[Any]=12 , _A : Any=3072 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Tuple=2 , _A : int=0.02 , _A : Any=1e-12 , _A : Dict=True , _A : Optional[Any]=None , _A : List[str]=1 , _A : List[str]=0 , _A : Dict=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = entity_vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = entity_emb_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
__SCREAMING_SNAKE_CASE : int = use_entity_aware_attention
__SCREAMING_SNAKE_CASE : Any = classifier_dropout
| 303
| 0
|
A__: str = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 149
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
| 0
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 140
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''PoolFormerFeatureExtractor''']
__lowercase = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272
|
import os
import sys
lowercase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoConfig.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModel.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
| 303
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case : Any = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 240
|
from __future__ import annotations
import numpy as np
def a__ ( snake_case ):
"""simple docstring"""
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303
| 0
|
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase ) -> List[str]:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCAmelCase__ : Optional[Any] = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase ) -> List[Any]:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCAmelCase__ : List[str] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ) -> Optional[int]:
def do_benchmark(lowerCAmelCase ) -> None:
UpperCAmelCase__ : Tuple = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }""" )
UpperCAmelCase__ : List[str] = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=lowerCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }""" )
UpperCAmelCase__ : Union[str, Any] = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=lowerCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 171
|
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = {1: 1}
for inputa in range(2 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__SCREAMING_SNAKE_CASE : str = counter
if counter > pre_counter:
__SCREAMING_SNAKE_CASE : Optional[int] = inputa
__SCREAMING_SNAKE_CASE : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303
| 0
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowerCamelCase__ : Any = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Union[str, Any]:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ = k.replace(__UpperCAmelCase , __UpperCAmelCase )
return k
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = DEFAULTS.copy()
cfg_kwargs.update(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = PegasusConfig(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = PegasusForConditionalGeneration(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_ = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_ = rename_state_dict_key(__UpperCAmelCase )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_ = v.T
SCREAMING_SNAKE_CASE_ = torch.tensor(__UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_ = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE_ = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE_ = {k: torch.zeros_like(__UpperCAmelCase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch_model.model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def UpperCAmelCase_ ( __UpperCAmelCase : Any="./ckpt/aeslc/model.ckpt-32000" ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tf.train.list_variables(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__UpperCAmelCase , desc='converting tf checkpoint to dict' ):
SCREAMING_SNAKE_CASE_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_ = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = array
return tf_weights
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = Path(__UpperCAmelCase ).parent.name
SCREAMING_SNAKE_CASE_ = task_specific_params[f"summarization_{dataset}"]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE_ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=__UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__UpperCAmelCase )
# convert model
SCREAMING_SNAKE_CASE_ = get_tf_weights_as_numpy(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
SCREAMING_SNAKE_CASE_ = task_specific_params
SCREAMING_SNAKE_CASE_ = convert_pegasus(__UpperCAmelCase , __UpperCAmelCase )
torch_model.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(__UpperCAmelCase , Path(__UpperCAmelCase ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowerCamelCase__ : int = parser.parse_args()
if args.save_dir is None:
lowerCamelCase__ : str = Path(args.tf_ckpt_path).parent.name
lowerCamelCase__ : Tuple = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 225
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303
| 0
|
def __UpperCamelCase ( _A : Dict ) ->Any:
"""simple docstring"""
lowerCamelCase_ =len(_A )
for _ in range(_A ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCamelCase_ =arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__A : List[Any] = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 154
|
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303
| 0
|
def lowerCAmelCase__ ( ):
"""simple docstring"""
for n in range(1 , 100_0000 ):
yield n * (n + 1) // 2
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = 1
__a = 2
while i * i <= n:
__a = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase__ ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 )
if __name__ == "__main__":
print(solution())
| 302
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''', '''negative_prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor
__SCREAMING_SNAKE_CASE : str = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
__SCREAMING_SNAKE_CASE : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -10:]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 303
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : Dict = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["OwlViTFeatureExtractor"]
lowercase : str = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(snake_case )} examples to process.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[str] = 10_000
__SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE : List[str] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(snake_case )} examples processed.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE : List[str] = [np.uintaa(snake_case ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 303
| 0
|
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Optional[int]:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowerCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303
| 0
|
'''simple docstring'''
import math
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[Any]:
_a = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowercase )
def _lowerCamelCase ( lowercase : List[Any] = 1 / 1_2345 ) -> Optional[int]:
_a = 0
_a = 0
_a = 3
while True:
_a = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowercase ):
_a = int(lowercase )
total_partitions += 1
if check_partition_perfect(lowercase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowercase )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63
|
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = grid.shape
__SCREAMING_SNAKE_CASE : Tuple = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = None
while queue:
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : int = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_UpperCamelCase : Optional[Any] = get_tests_dir('fixtures')
class snake_case__ ( unittest.TestCase):
def A ( self : List[Any] ) -> Any:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : Optional[Any] = mock.Mock()
UpperCAmelCase_ : Union[str, Any] = 5_00
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : List[str] = HTTPError
UpperCAmelCase_ : List[str] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_A ) as mock_head:
UpperCAmelCase_ : int = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def A ( self : int ) -> int:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : Dict = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def A ( self : List[str] ) -> List[str]:
with self.assertRaises(_A ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
UpperCAmelCase_ : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_A )
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Any ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : Tuple ) -> Any:
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def A ( self : int ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = ViTImageProcessor.from_pretrained(_A )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
UpperCAmelCase_ : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_A , repo_id='''test-image-processor''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
def A ( self : Tuple ) -> str:
UpperCAmelCase_ : str = ViTImageProcessor.from_pretrained(_A )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_A , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Optional[int] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
def A ( self : int ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase_ : str = CustomImageProcessor.from_pretrained(_A )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
UpperCAmelCase_ : Optional[Any] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor" , trust_remote_code=_A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 304
|
'''simple docstring'''
def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1
UpperCAmelCase_ : Dict = []
for i in range(1 , n + 1 ):
UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator
UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator
if len(str(A ) ) > len(str(A ) ):
result.append(A )
UpperCAmelCase_ : Optional[Any] = numerator
UpperCAmelCase_ : Optional[int] = denominator
return len(A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 304
| 1
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class snake_case__ ( UpperCamelCase):
def __init__( self : Optional[Any] , _A : Optional[int] , _A : List[Any] , _A : Dict=10_24 , _A : Dict=10_24 , _A : Optional[int]=3.6 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = tokenizer
UpperCAmelCase_ : List[Any] = tokenizer.bos_token_id
UpperCAmelCase_ : Dict = dataset
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Tuple = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = iter(self.dataset )
UpperCAmelCase_ : int = True
while more_examples:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCAmelCase_ : Optional[Any] = False
break
UpperCAmelCase_ : str = tokenizer(_A , truncation=_A )['''input_ids''']
UpperCAmelCase_ : Any = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
UpperCAmelCase_ : List[Any] = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def __UpperCAmelCase ( A : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = {'''streaming''': True}
UpperCAmelCase_ : int = load_dataset(args.dataset_name , split='''train''' , **A )
UpperCAmelCase_ : Union[str, Any] = ConstantLengthDataset(A , A , seq_length=args.seq_length )
UpperCAmelCase_ : Any = DataLoader(A , batch_size=args.batch_size )
return eval_dataloader
def __UpperCAmelCase ( A : Union[str, Any] ) -> Optional[Any]:
model.eval()
UpperCAmelCase_ : int = []
for step, batch in enumerate(A ):
with torch.no_grad():
UpperCAmelCase_ : Any = model(A , labels=A )
UpperCAmelCase_ : str = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(A ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCAmelCase_ : List[Any] = torch.mean(torch.cat(A ) )
try:
UpperCAmelCase_ : List[str] = torch.exp(A )
except OverflowError:
UpperCAmelCase_ : Any = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_UpperCamelCase : Optional[int] = Accelerator()
# Parse configuration
_UpperCamelCase : Optional[Any] = HfArgumentParser(EvaluationArguments)
_UpperCamelCase : List[Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_UpperCamelCase : Tuple = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_UpperCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_UpperCamelCase : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
_UpperCamelCase , _UpperCamelCase : List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_UpperCamelCase , _UpperCamelCase : Dict = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 304
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : List[Any] = do_reduce_labels
def A ( self : Union[str, Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BeitImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _A )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _A )
def A ( self : Optional[Any] ) -> Any:
pass
def A ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> str:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
UpperCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs()
UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def A ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 304
| 1
|
'''simple docstring'''
from typing import Any
import numpy as np
def __UpperCAmelCase ( A : np.ndarray ) -> bool:
return np.array_equal(A , matrix.conjugate().T )
def __UpperCAmelCase ( A : np.ndarray , A : np.ndarray ) -> Any:
UpperCAmelCase_ : Dict = v.conjugate().T
UpperCAmelCase_ : Any = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : int = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCAmelCase_ : List[Any] = np.array([[1], [2], [3]] )
assert is_hermitian(A ), F"{a} is not hermitian."
print(rayleigh_quotient(A , A ) )
UpperCAmelCase_ : List[Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), F"{a} is not hermitian."
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case__ ( enum.Enum):
a_ = 0
a_ = 1
a_ = 2
@add_end_docstrings(UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]:
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Dict = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params )
UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params}
def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : List[Any] = prefix
if prefix:
UpperCAmelCase_ : Tuple = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
UpperCAmelCase_ : Union[str, Any] = handle_long_generation
preprocess_params.update(_A )
UpperCAmelCase_ : Optional[int] = generate_kwargs
UpperCAmelCase_ : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : List[Any] = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase_ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict:
return super().__call__(_A , **_A )
def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : str = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens''']
else:
UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]:
UpperCAmelCase_ : Any = model_inputs['''input_ids''']
UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = 1
else:
UpperCAmelCase_ : Optional[int] = input_ids.shape[0]
UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
UpperCAmelCase_ : Any = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0]
UpperCAmelCase_ : int = model_outputs['''input_ids''']
UpperCAmelCase_ : str = model_outputs['''prompt_text''']
UpperCAmelCase_ : Any = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : Any = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[str] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : List[str] = {'''generated_text''': all_text}
records.append(_A )
return records
| 304
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"text": Value("string")})
a_ = Features({})
a_ = "text"
@property
def A ( self : List[str] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 304
|
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
)
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 )
print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 304
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : int = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class snake_case__ ( UpperCamelCase):
a_ = "speech_to_text"
a_ = ["past_key_values"]
a_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , _A : Optional[int]=1_00_00 , _A : int=12 , _A : Optional[Any]=20_48 , _A : Tuple=4 , _A : Tuple=6 , _A : Union[str, Any]=20_48 , _A : Optional[Any]=4 , _A : List[Any]=0.0 , _A : Optional[Any]=0.0 , _A : List[str]=True , _A : List[Any]=True , _A : Any="relu" , _A : Union[str, Any]=2_56 , _A : Optional[Any]=0.1 , _A : List[str]=0.0 , _A : Optional[int]=0.0 , _A : int=0.02 , _A : Any=2 , _A : Dict=True , _A : int=1 , _A : Any=0 , _A : List[str]=2 , _A : Union[str, Any]=60_00 , _A : str=10_24 , _A : Optional[int]=2 , _A : Optional[int]=(5, 5) , _A : Dict=10_24 , _A : List[str]=80 , _A : Any=1 , **_A : Tuple , ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Optional[Any] = d_model
UpperCAmelCase_ : Tuple = encoder_ffn_dim
UpperCAmelCase_ : List[str] = encoder_layers
UpperCAmelCase_ : int = encoder_attention_heads
UpperCAmelCase_ : Optional[int] = decoder_ffn_dim
UpperCAmelCase_ : Dict = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : Any = dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : List[Any] = activation_dropout
UpperCAmelCase_ : Optional[Any] = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : List[str] = encoder_layerdrop
UpperCAmelCase_ : Tuple = decoder_layerdrop
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_ : Union[str, Any] = max_source_positions
UpperCAmelCase_ : Tuple = max_target_positions
UpperCAmelCase_ : Tuple = num_conv_layers
UpperCAmelCase_ : Optional[int] = list(_A )
UpperCAmelCase_ : List[Any] = conv_channels
UpperCAmelCase_ : int = input_feat_per_channel
UpperCAmelCase_ : Tuple = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , **_A , )
| 304
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : List[Any] = result + left + right
return input_list
def __UpperCAmelCase ( A : list ) -> list:
if len(A ) <= 1:
return input_list
UpperCAmelCase_ : List[str] = list(A )
# iteration for two-way merging
UpperCAmelCase_ : Tuple = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A ) , A ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : int = i + p - 1
UpperCAmelCase_ : Any = (low + high + 1) // 2
UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A )
# final merge of last two parts
if p * 2 >= len(A ):
UpperCAmelCase_ : str = i
UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_UpperCamelCase : List[str] = []
else:
_UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 304
| 1
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_UpperCamelCase : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
_UpperCamelCase : Union[str, Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
_UpperCamelCase : Optional[int] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case__ ( datasets.Metric):
def A ( self : Tuple ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def A ( self : int , _A : List[Any] , _A : Union[str, Any] , _A : List[str]=None , _A : List[str]=False , _A : int=False , _A : Optional[Any]=False , ) -> Union[str, Any]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase_ : str = np.array([re.sub(_A , '''''' , _A ) for x in predictions] )
UpperCAmelCase_ : int = np.array([re.sub(_A , '''''' , _A ) for x in references] )
else:
UpperCAmelCase_ : Union[str, Any] = np.asarray(_A )
UpperCAmelCase_ : Any = np.asarray(_A )
if ignore_case:
UpperCAmelCase_ : Tuple = np.char.lower(_A )
UpperCAmelCase_ : Any = np.char.lower(_A )
if ignore_punctuation:
UpperCAmelCase_ : Tuple = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
UpperCAmelCase_ : int = np.char.translate(_A , table=_A )
UpperCAmelCase_ : List[str] = np.char.translate(_A , table=_A )
if ignore_numbers:
UpperCAmelCase_ : List[str] = string.digits.maketrans('''''' , '''''' , string.digits )
UpperCAmelCase_ : Dict = np.char.translate(_A , table=_A )
UpperCAmelCase_ : List[str] = np.char.translate(_A , table=_A )
UpperCAmelCase_ : Union[str, Any] = predictions == references
return {"exact_match": np.mean(_A ) * 1_00}
| 304
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def A ( self : Tuple ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def A ( self : List[Any] ) -> Union[str, Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def A ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def A ( self : Optional[int] ) -> torch.Tensor:
UpperCAmelCase_ : Dict = torch.arange(self.height * self.width )
UpperCAmelCase_ : int = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) )
UpperCAmelCase_ : Any = self.get_image_coords()
UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A )
UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor:
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 )
UpperCAmelCase_ : Union[str, Any] = self.resolution()
UpperCAmelCase_ : int = self.fov()
UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(_A , 1 , 3 )
+ self.x.view(_A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A )
UpperCAmelCase_ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3 )
def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Optional[int] = -z * 4
UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] )
UpperCAmelCase_ : List[Any] = np.cross(A , A )
origins.append(A )
xs.append(A )
ys.append(A )
zs.append(A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
| 304
| 1
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( UpperCamelCase):
def A ( self : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = SMALL_MODEL_IDENTIFIER
UpperCAmelCase_ : Optional[Any] = '''pt'''
UpperCAmelCase_ : Dict = '''tf'''
def A ( self : str , _A : Any ) -> List[str]:
UpperCAmelCase_ : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_A )
def A ( self : Tuple , _A : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Optional[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=_A )
model_tf.save_pretrained(_A )
def A ( self : Dict ) -> str:
UpperCAmelCase_ : Tuple = '''mock_framework'''
# Framework provided - return whatever the user provides
UpperCAmelCase_ : int = FeaturesManager.determine_framework(self.test_model , _A )
self.assertEqual(_A , _A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_A )
UpperCAmelCase_ : int = FeaturesManager.determine_framework(_A , _A )
self.assertEqual(_A , _A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_A )
UpperCAmelCase_ : Dict = FeaturesManager.determine_framework(_A , _A )
self.assertEqual(_A , _A )
def A ( self : str ) -> Optional[int]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_A )
UpperCAmelCase_ : Dict = FeaturesManager.determine_framework(_A )
self.assertEqual(_A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_A )
UpperCAmelCase_ : str = FeaturesManager.determine_framework(_A )
self.assertEqual(_A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FeaturesManager.determine_framework(_A )
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = MagicMock(return_value=_A )
with patch('''transformers.onnx.features.is_tf_available''' , _A ):
UpperCAmelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
UpperCAmelCase_ : List[str] = MagicMock(return_value=_A )
with patch('''transformers.onnx.features.is_torch_available''' , _A ):
UpperCAmelCase_ : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_A , self.framework_tf )
# Both in environment -> use PyTorch
UpperCAmelCase_ : Optional[int] = MagicMock(return_value=_A )
UpperCAmelCase_ : Union[str, Any] = MagicMock(return_value=_A )
with patch('''transformers.onnx.features.is_tf_available''' , _A ), patch(
'''transformers.onnx.features.is_torch_available''' , _A ):
UpperCAmelCase_ : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_A , self.framework_pt )
# Both not in environment -> raise error
UpperCAmelCase_ : Any = MagicMock(return_value=_A )
UpperCAmelCase_ : Union[str, Any] = MagicMock(return_value=_A )
with patch('''transformers.onnx.features.is_tf_available''' , _A ), patch(
'''transformers.onnx.features.is_torch_available''' , _A ):
with self.assertRaises(_A ):
UpperCAmelCase_ : Any = FeaturesManager.determine_framework(self.test_model )
| 304
|
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : str ) -> tuple[list[int], list[int]]:
UpperCAmelCase_ : Dict = [ord(_A ) for i in text]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
for i in plain:
UpperCAmelCase_ : int = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : list[int] , _A : list[int] ) -> str:
UpperCAmelCase_ : Dict = []
for i in range(len(_A ) ):
UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : List[Any] = do_reduce_labels
def A ( self : Union[str, Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BeitImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _A )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _A )
def A ( self : Optional[Any] ) -> Any:
pass
def A ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> str:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
UpperCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs()
UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def A ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 304
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = ReformerTokenizer
a_ = ReformerTokenizerFast
a_ = True
a_ = False
a_ = True
def A ( self : Optional[Any] ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[Any] = '''<s>'''
UpperCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_A ) , 10_00 )
def A ( self : Optional[int] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A ( self : Optional[Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(_A )
UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def A ( self : Tuple , _A : Dict=15 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
UpperCAmelCase_ : Optional[int] = '''This is a simple input'''
UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def A ( self : Union[str, Any] ) -> int:
pass
def A ( self : int ) -> Any:
UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A )
UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A ( self : List[str] ) -> Optional[int]:
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : Tuple = '''Hello World!'''
UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def A ( self : List[Any] ) -> str:
UpperCAmelCase_ : Tuple = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase_ : int = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def A ( self : List[str] ) -> Optional[int]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ : List[Any] = ''' '''.join(_A )
UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' )
UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
UpperCAmelCase_ : List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape
UpperCAmelCase_ : Optional[int] = ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def A ( self : int ) -> Optional[Any]:
# fmt: off
UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ : Optional[Any] = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
| 304
| 1
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
_UpperCamelCase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __UpperCAmelCase ( A : str ) -> List[Any]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase_ : int = model_type_to_module_name(A )
UpperCAmelCase_ : Dict = importlib.import_module(F".{module_name}" , '''transformers.models''' )
try:
return getattr(A , A )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A , '''__name__''' , A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase_ : Union[str, Any] = importlib.import_module('''transformers''' )
if hasattr(A , A ):
return getattr(A , A )
return None
def __UpperCAmelCase ( A : Union[str, os.PathLike] , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , A : bool = False , A : Optional[Dict[str, str]] = None , A : Optional[Union[bool, str]] = None , A : Optional[str] = None , A : bool = False , **A : str , ) -> Any:
UpperCAmelCase_ : Union[str, Any] = get_file_from_repo(
A , A , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(A , encoding='''utf-8''' ) as reader:
return json.load(A )
class snake_case__ :
def __init__( self : Union[str, Any] ) -> List[str]:
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_A )
def A ( cls : Tuple , _A : List[str] , **_A : int ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = kwargs.pop('''config''' , _A )
UpperCAmelCase_ : List[Any] = kwargs.pop('''trust_remote_code''' , _A )
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_A , **_A )
UpperCAmelCase_ : int = config_dict.get('''feature_extractor_type''' , _A )
UpperCAmelCase_ : Any = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
UpperCAmelCase_ : Dict = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_A , _A ):
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(_A , **_A )
# It could be in `config.feature_extractor_type``
UpperCAmelCase_ : Optional[Any] = getattr(_A , '''feature_extractor_type''' , _A )
if hasattr(_A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
UpperCAmelCase_ : Optional[Any] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
UpperCAmelCase_ : Optional[Any] = feature_extractor_class_from_name(_A )
UpperCAmelCase_ : List[str] = feature_extractor_auto_map is not None
UpperCAmelCase_ : Optional[Any] = feature_extractor_class is not None or type(_A ) in FEATURE_EXTRACTOR_MAPPING
UpperCAmelCase_ : Union[str, Any] = resolve_trust_remote_code(
_A , _A , _A , _A )
if has_remote_code and trust_remote_code:
UpperCAmelCase_ : List[str] = get_class_from_dynamic_module(
_A , _A , **_A )
UpperCAmelCase_ : List[Any] = kwargs.pop('''code_revision''' , _A )
if os.path.isdir(_A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_A , **_A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_A , **_A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_A ) in FEATURE_EXTRACTOR_MAPPING:
UpperCAmelCase_ : int = FEATURE_EXTRACTOR_MAPPING[type(_A )]
return feature_extractor_class.from_dict(_A , **_A )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def A ( _A : Optional[int] , _A : Any ) -> List[Any]:
FEATURE_EXTRACTOR_MAPPING.register(_A , _A )
| 304
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : str ) -> list[int]:
return [ord(A ) - 9_6 for elem in plain]
def __UpperCAmelCase ( A : list[int] ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , A )
print('''Decoded:''' , decode(A ) )
if __name__ == "__main__":
main()
| 304
| 1
|
'''simple docstring'''
import os
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : Any = os.path.join(os.path.dirname(A ) , '''num.txt''' )
with open(A ) as file_hand:
return str(sum(int(A ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 304
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 304
| 1
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304
|
'''simple docstring'''
def __UpperCAmelCase ( A : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCAmelCase_ : int = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
UpperCAmelCase_ : List[str] = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( A : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCamelCase : Union[str, Any] = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 304
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCamelCase : Any = logging.getLogger(__name__)
class snake_case__ ( UpperCamelCase):
a_ = "masked_bert"
def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = pruning_method
UpperCAmelCase_ : Optional[int] = mask_init
UpperCAmelCase_ : List[Any] = mask_scale
| 304
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __UpperCAmelCase ( A : Optional[int] ) -> Dict:
UpperCAmelCase_ : Optional[int] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class snake_case__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionLatentUpscalePipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
a_ = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
a_ = True
@property
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[int] = 4
UpperCAmelCase_ : List[Any] = (16, 16)
UpperCAmelCase_ : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_A )
return image
def A ( self : Optional[Any] ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_A , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_A , only_cross_attention=_A , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
UpperCAmelCase_ : Optional[Any] = EulerDiscreteScheduler(prediction_type='''sample''' )
UpperCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : List[str] = CLIPTextModel(_A )
UpperCAmelCase_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Union[str, Any] = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def A ( self : Any , _A : Tuple , _A : Any=0 ) -> Optional[Any]:
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : List[Any] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : Dict = pipe(**_A ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : str ) -> Optional[int]:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def A ( self : str ) -> Any:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def A ( self : Any ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def A ( self : List[Any] ) -> str:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def A ( self : List[str] ) -> Optional[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def A ( self : Any ) -> int:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def A ( self : Any ) -> Any:
UpperCAmelCase_ : List[Any] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Dict = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : Optional[Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCAmelCase_ : List[Any] = getattr(_A , scheduler_enum.name )
UpperCAmelCase_ : List[Any] = scheduler_cls.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Any = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : List[Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int] ) -> Dict:
UpperCAmelCase_ : Any = torch.manual_seed(33 )
UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
UpperCAmelCase_ : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
UpperCAmelCase_ : Union[str, Any] = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
UpperCAmelCase_ : Tuple = pipe(_A , generator=_A , output_type='''latent''' ).images
UpperCAmelCase_ : Optional[int] = upscaler(
prompt=_A , image=_A , num_inference_steps=20 , guidance_scale=0 , generator=_A , output_type='''np''' , ).images[0]
UpperCAmelCase_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def A ( self : int ) -> str:
UpperCAmelCase_ : Optional[int] = torch.manual_seed(33 )
UpperCAmelCase_ : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
UpperCAmelCase_ : List[str] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
UpperCAmelCase_ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
UpperCAmelCase_ : Union[str, Any] = upscaler(
prompt=_A , image=_A , num_inference_steps=20 , guidance_scale=0 , generator=_A , output_type='''np''' , ).images[0]
UpperCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 304
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionDiffEditPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
def A ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : str , _A : List[str] , _A : Any=0 ) -> str:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1e-4 )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCAmelCase_ : int = pipe.generate_mask(**_A )
UpperCAmelCase_ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : List[Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = '''cpu'''
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : Tuple ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Any = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
UpperCAmelCase_ : Any = raw_image
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Tuple = '''a bowl of pears'''
UpperCAmelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[str] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase_ : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Dict = '''a bowl of pears'''
UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase_ : Dict = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 304
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1
UpperCAmelCase_ : Dict = []
for i in range(1 , n + 1 ):
UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator
UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator
if len(str(A ) ) > len(str(A ) ):
result.append(A )
UpperCAmelCase_ : Optional[Any] = numerator
UpperCAmelCase_ : Optional[int] = denominator
return len(A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 304
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304
| 1
|
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case__ ( UpperCamelCase):
def __init__( self : Optional[int] , *_A : Tuple , _A : Optional[Any]=None , _A : Union[str, Any]=None , **_A : Dict ) -> int:
super().__init__(*_A , **_A )
UpperCAmelCase_ : Optional[Any] = eval_examples
UpperCAmelCase_ : Dict = post_process_function
def A ( self : Optional[Any] , _A : str=None , _A : Optional[Any]=None , _A : List[str]=None , _A : str = "eval" ) -> List[str]:
UpperCAmelCase_ : Any = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ : Optional[Any] = self.get_eval_dataloader(_A )
UpperCAmelCase_ : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : Optional[int] = self.compute_metrics
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase_ : Union[str, Any] = time.time()
try:
UpperCAmelCase_ : Optional[int] = eval_loop(
_A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
UpperCAmelCase_ : Tuple = compute_metrics
UpperCAmelCase_ : int = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_ : Optional[Any] = self.post_process_function(_A , _A , output.predictions )
UpperCAmelCase_ : Tuple = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCAmelCase_ : int = metrics.pop(_A )
metrics.update(output.metrics )
else:
UpperCAmelCase_ : Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ : Union[str, Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def A ( self : List[str] , _A : Optional[Any] , _A : List[str] , _A : Tuple=None , _A : str = "test" ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : Dict = self.compute_metrics
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase_ : Dict = time.time()
try:
UpperCAmelCase_ : Tuple = eval_loop(
_A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
UpperCAmelCase_ : Any = compute_metrics
UpperCAmelCase_ : List[str] = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ : Optional[int] = self.post_process_function(_A , _A , output.predictions , '''predict''' )
UpperCAmelCase_ : Any = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCAmelCase_ : List[str] = metrics.pop(_A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
| 304
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"text": Value("string")})
a_ = Features({})
a_ = "text"
@property
def A ( self : List[str] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 304
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
_UpperCamelCase : Any = 5
_UpperCamelCase : Tuple = 10
@require_sentencepiece
@require_tokenizers
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = SpeechaTextTokenizer
a_ = False
a_ = True
def A ( self : str ) -> Any:
super().setUp()
UpperCAmelCase_ : Optional[Any] = sp.SentencePieceProcessor()
spm_model.Load(_A )
UpperCAmelCase_ : Tuple = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_A ) )]
UpperCAmelCase_ : List[str] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase_ : str = Path(self.tmpdirname )
save_json(_A , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
UpperCAmelCase_ : str = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Dict ) -> str:
UpperCAmelCase_ : List[Any] = '''<pad>'''
UpperCAmelCase_ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_A ) , 10_01 )
def A ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def A ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2_89, 50, 14, 1_74, 3_86] , )
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def A ( self : Optional[int] ) -> Tuple:
# fmt: off
UpperCAmelCase_ : List[Any] = {'''input_ids''': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class snake_case__ ( unittest.TestCase):
a_ = "valhalla/s2t_mustc_multilinguial_medium"
a_ = "C'est trop cool"
a_ = "Esto es genial"
@classmethod
def A ( cls : str ) -> Tuple:
UpperCAmelCase_ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def A ( self : Union[str, Any] ) -> Tuple:
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def A ( self : Union[str, Any] ) -> Optional[Any]:
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def A ( self : int ) -> Optional[Any]:
self.assertIn(_A , self.tokenizer.all_special_ids )
UpperCAmelCase_ : Union[str, Any] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
UpperCAmelCase_ : Dict = self.tokenizer.decode(_A , skip_special_tokens=_A )
UpperCAmelCase_ : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def A ( self : int ) -> str:
UpperCAmelCase_ : List[Any] = '''fr'''
UpperCAmelCase_ : Dict = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _A )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def A ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
UpperCAmelCase_ : Union[str, Any] = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 304
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict:
with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(A )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
for key, info in class_info.items():
UpperCAmelCase_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A ) )
UpperCAmelCase_ : Any = thing_ids
UpperCAmelCase_ : Union[str, Any] = class_names
return metadata
class snake_case__ ( unittest.TestCase):
def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Optional[int] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
UpperCAmelCase_ : str = class_info_file
UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A )
UpperCAmelCase_ : Tuple = num_text
UpperCAmelCase_ : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Dict = 10
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = do_reduce_labels
UpperCAmelCase_ : str = ignore_index
def A ( self : Dict ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]:
if not batched:
UpperCAmelCase_ : Any = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ : int = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ : Dict = self.size['''shortest_edge''']
UpperCAmelCase_ : str = self.size['''shortest_edge''']
else:
UpperCAmelCase_ : Dict = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
def A ( self : Tuple ) -> str:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ = image_processing_class
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : int = OneFormerImageProcessorTester(self )
@property
def A ( self : Any ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''ignore_index''' ) )
self.assertTrue(hasattr(_A , '''class_info_file''' ) )
self.assertTrue(hasattr(_A , '''num_text''' ) )
self.assertTrue(hasattr(_A , '''repo_path''' ) )
self.assertTrue(hasattr(_A , '''metadata''' ) )
self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) )
def A ( self : Dict ) -> Dict:
pass
def A ( self : Tuple ) -> Dict:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : int = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Dict ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Optional[int] = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
if with_segmentation_maps:
UpperCAmelCase_ : Any = num_labels
if is_instance_map:
UpperCAmelCase_ : Any = list(range(_A ) ) * 2
UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) )
UpperCAmelCase_ : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations]
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , )
return inputs
def A ( self : int ) -> str:
pass
def A ( self : Tuple ) -> Union[str, Any]:
def common(_A : Optional[int]=False , _A : str=None ):
UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A )
UpperCAmelCase_ : List[Any] = inputs['''mask_labels''']
UpperCAmelCase_ : Optional[Any] = inputs['''class_labels''']
UpperCAmelCase_ : int = inputs['''pixel_values''']
UpperCAmelCase_ : Tuple = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_A , _A , _A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_A )
common(is_instance_map=_A , segmentation_type='''pil''' )
common(is_instance_map=_A , segmentation_type='''pil''' )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = np.zeros((20, 50) )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A )
self.assertEqual(len(_A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase_ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A )
self.assertEqual(len(_A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 304
| 1
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
a_ = 42
class snake_case__ ( UpperCamelCase , UpperCamelCase):
a_ = 1
@register_to_config
def __init__( self : Optional[Any] , _A : int = 20_00 , _A : float = 0.15 , _A : float = 0.01 , _A : float = 1_348.0 , _A : float = 1e-5 , _A : int = 1 , ) -> int:
# standard deviation of the initial noise distribution
UpperCAmelCase_ : Any = sigma_max
# setable values
UpperCAmelCase_ : Optional[int] = None
self.set_sigmas(_A , _A , _A , _A )
def A ( self : int , _A : torch.FloatTensor , _A : Optional[int] = None ) -> torch.FloatTensor:
return sample
def A ( self : str , _A : int , _A : float = None , _A : Union[str, torch.device] = None ) -> Any:
UpperCAmelCase_ : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase_ : Optional[Any] = torch.linspace(1 , _A , _A , device=_A )
def A ( self : Any , _A : int , _A : float = None , _A : float = None , _A : float = None ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase_ : Union[str, Any] = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase_ : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_A , _A )
UpperCAmelCase_ : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase_ : List[str] = torch.exp(torch.linspace(math.log(_A ) , math.log(_A ) , _A ) )
UpperCAmelCase_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def A ( self : Any , _A : int , _A : Dict ) -> Dict:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def A ( self : List[str] , _A : torch.FloatTensor , _A : int , _A : torch.FloatTensor , _A : Optional[torch.Generator] = None , _A : bool = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
UpperCAmelCase_ : Any = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase_ : Tuple = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase_ : List[Any] = timesteps.to(self.discrete_sigmas.device )
UpperCAmelCase_ : List[Any] = self.discrete_sigmas[timesteps].to(sample.device )
UpperCAmelCase_ : List[str] = self.get_adjacent_sigma(_A , _A ).to(sample.device )
UpperCAmelCase_ : Any = torch.zeros_like(_A )
UpperCAmelCase_ : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase_ : Optional[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCAmelCase_ : List[str] = diffusion.unsqueeze(-1 )
UpperCAmelCase_ : List[str] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase_ : Tuple = randn_tensor(
sample.shape , layout=sample.layout , generator=_A , device=sample.device , dtype=sample.dtype )
UpperCAmelCase_ : Optional[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase_ : str = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_A , prev_sample_mean=_A )
def A ( self : Union[str, Any] , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : Optional[torch.Generator] = None , _A : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase_ : Tuple = randn_tensor(sample.shape , layout=sample.layout , generator=_A ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase_ : Dict = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase_ : Union[str, Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase_ : Union[str, Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase_ : List[Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCAmelCase_ : Dict = step_size.unsqueeze(-1 )
UpperCAmelCase_ : int = sample + step_size * model_output
UpperCAmelCase_ : Optional[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_A )
def A ( self : Union[str, Any] , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ : Dict = timesteps.to(original_samples.device )
UpperCAmelCase_ : Optional[int] = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCAmelCase_ : int = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_A ) * sigmas[:, None, None, None]
)
UpperCAmelCase_ : Tuple = noise + original_samples
return noisy_samples
def __len__( self : Optional[Any] ) -> List[str]:
return self.config.num_train_timesteps
| 304
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
| 1
|
'''simple docstring'''
from __future__ import annotations
_UpperCamelCase : Tuple = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_UpperCamelCase : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __UpperCAmelCase ( A : list[float] ) -> list[float]:
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[str] = len(A )
for i in range(A ):
UpperCAmelCase_ : float = -1
for j in range(i + 1 , A ):
if arr[i] < arr[j]:
UpperCAmelCase_ : List[str] = arr[j]
break
result.append(A )
return result
def __UpperCAmelCase ( A : list[float] ) -> list[float]:
UpperCAmelCase_ : str = []
for i, outer in enumerate(A ):
UpperCAmelCase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCAmelCase_ : Any = inner
break
result.append(A )
return result
def __UpperCAmelCase ( A : list[float] ) -> list[float]:
UpperCAmelCase_ : int = len(A )
UpperCAmelCase_ : list[float] = []
UpperCAmelCase_ : list[float] = [-1] * arr_size
for index in reversed(range(A ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCAmelCase_ : Any = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_UpperCamelCase : Any = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 304
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
_UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."})
a_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
a_ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a_ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
a_ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]:
def _dataset(A : Dict , A : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __UpperCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
UpperCAmelCase_ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase_ : str = (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_ : Any = (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ : Any = Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
UpperCAmelCase_ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Dict = trainer.evaluate()
UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity}
UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(A )
return results
def __UpperCAmelCase ( A : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 304
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list[float] , A : Optional[Any] ) -> int:
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A ):
print(F"{i}\t\t{d}" )
def __UpperCAmelCase ( A : list[dict[str, int]] , A : list[float] , A : int ) -> List[str]:
for j in range(A ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __UpperCAmelCase ( A : list[dict[str, int]] , A : int , A : int , A : int ) -> list[float]:
UpperCAmelCase_ : Tuple = [float('''inf''' )] * vertex_count
UpperCAmelCase_ : Union[str, Any] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCAmelCase_ : Tuple = distance[u] + w
UpperCAmelCase_ : Dict = check_negative_cycle(A , A , A )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : Union[str, Any] = int(input('Enter number of vertices: ').strip())
_UpperCamelCase : Union[str, Any] = int(input('Enter number of edges: ').strip())
_UpperCamelCase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_UpperCamelCase : Optional[int] = {'src': src, 'dst': dest, 'weight': weight}
_UpperCamelCase : Dict = int(input('\nEnter shortest path source:').strip())
_UpperCamelCase : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 304
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304
| 1
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCAmelCase_ : List[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCAmelCase_ : List[str] = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCAmelCase_ : Union[str, Any] = tf_top_k_top_p_filtering(_A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
UpperCAmelCase_ : Optional[int] = output[output != -float('''inf''' )]
UpperCAmelCase_ : Optional[Any] = tf.cast(
tf.where(tf.not_equal(_A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_A , _A , rtol=1e-12 )
tf.debugging.assert_equal(_A , _A )
@require_tf
class snake_case__ ( unittest.TestCase , UpperCamelCase):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
a_ = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def A ( self : List[str] ) -> int:
# TF-only test: tf.saved_model export
UpperCAmelCase_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Any = 2
class snake_case__ ( tf.Module):
def __init__( self : Optional[int] , _A : Dict ) -> List[Any]:
super(_A , self ).__init__()
UpperCAmelCase_ : Optional[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_A , )
def A ( self : int , _A : Optional[Any] , _A : int ) -> str:
UpperCAmelCase_ : int = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ : str = [[2, 0], [1_02, 1_03]]
UpperCAmelCase_ : Optional[int] = [[1, 0], [1, 1]]
UpperCAmelCase_ : Tuple = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ : Union[str, Any] = tf.saved_model.load(_A ).signatures['''serving_default''']
for batch_size in range(1 , len(_A ) + 1 ):
UpperCAmelCase_ : Optional[Any] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase_ : Any = serving_func(**_A )['''sequences''']
UpperCAmelCase_ : int = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
def A ( self : Tuple ) -> Optional[int]:
# TF-only test: tf.saved_model export
UpperCAmelCase_ : Any = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Union[str, Any] = 2
class snake_case__ ( tf.Module):
def __init__( self : Union[str, Any] , _A : Optional[int] ) -> Dict:
super(_A , self ).__init__()
UpperCAmelCase_ : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_A , )
def A ( self : List[str] , _A : str , _A : Dict ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ : Optional[Any] = [[2], [1_02, 1_03]]
UpperCAmelCase_ : Optional[Any] = [[1], [1, 1]]
UpperCAmelCase_ : Optional[Any] = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ : Union[str, Any] = tf.saved_model.load(_A ).signatures['''serving_default''']
for input_row in range(len(_A ) ):
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase_ : List[Any] = serving_func(**_A )['''sequences''']
UpperCAmelCase_ : int = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
@require_tensorflow_text
def A ( self : Any ) -> Optional[int]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_A )
class snake_case__ ( tf.keras.layers.Layer):
def __init__( self : Optional[int] ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_A , '''spiece.model''' ) , '''rb''' ).read() )
UpperCAmelCase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def A ( self : List[Any] , _A : Tuple , *_A : Tuple , **_A : List[Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.tokenize(_A )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = text.pad_model_inputs(
_A , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
UpperCAmelCase_ : Optional[int] = self.model.generate(input_ids=_A , attention_mask=_A )
return self.tokenizer.detokenize(_A )
UpperCAmelCase_ : List[Any] = CompleteSentenceTransformer()
UpperCAmelCase_ : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
UpperCAmelCase_ : str = complete_model(_A )
UpperCAmelCase_ : Optional[Any] = tf.keras.Model(_A , _A )
keras_model.save(_A )
def A ( self : List[Any] ) -> Dict:
# Has PT equivalent: this test relies on random sampling
UpperCAmelCase_ : Tuple = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
UpperCAmelCase_ : Union[str, Any] = 14
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ : Any = '''Hello, my dog is cute and'''
UpperCAmelCase_ : Tuple = tokenizer(_A , return_tensors='''tf''' )
UpperCAmelCase_ : int = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ : Any = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ : Optional[Any] = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase_ : Any = [6_38, 1_98]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ : Dict = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def A ( self : int ) -> List[str]:
# Has PT equivalent: ample use of framework-specific code
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ : Optional[int] = '''Hugging Face is a technology company based in New York and Paris.'''
UpperCAmelCase_ : Any = bart_tokenizer(_A , return_tensors='''tf''' ).input_ids
UpperCAmelCase_ : Dict = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ : Any = bart_model.generate(_A ).numpy()
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] , _A : Any , _A : List[Any]=None , **_A : Optional[Any] ) -> str:
return super().call(_A , **_A )
UpperCAmelCase_ : List[Any] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ : int = bart_model.generate(_A , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_A , _A ) )
class snake_case__ ( bart_model.model.encoder.__class__):
def A ( self : Union[str, Any] , _A : List[Any] , **_A : Dict ) -> Tuple:
return super().call(_A , **_A )
UpperCAmelCase_ : List[str] = FakeEncoder(bart_model.config , bart_model.model.shared )
UpperCAmelCase_ : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase_ : Tuple = bart_model.generate(_A ).numpy()
with self.assertRaises(_A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_A , foo='''bar''' )
| 304
|
'''simple docstring'''
_UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCamelCase : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 304
| 1
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict:
with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(A )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
for key, info in class_info.items():
UpperCAmelCase_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A ) )
UpperCAmelCase_ : Any = thing_ids
UpperCAmelCase_ : Union[str, Any] = class_names
return metadata
class snake_case__ ( unittest.TestCase):
def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Optional[int] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
UpperCAmelCase_ : str = class_info_file
UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A )
UpperCAmelCase_ : Tuple = num_text
UpperCAmelCase_ : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Dict = 10
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = do_reduce_labels
UpperCAmelCase_ : str = ignore_index
def A ( self : Dict ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]:
if not batched:
UpperCAmelCase_ : Any = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ : int = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ : Dict = self.size['''shortest_edge''']
UpperCAmelCase_ : str = self.size['''shortest_edge''']
else:
UpperCAmelCase_ : Dict = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
def A ( self : Tuple ) -> str:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ = image_processing_class
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : int = OneFormerImageProcessorTester(self )
@property
def A ( self : Any ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''ignore_index''' ) )
self.assertTrue(hasattr(_A , '''class_info_file''' ) )
self.assertTrue(hasattr(_A , '''num_text''' ) )
self.assertTrue(hasattr(_A , '''repo_path''' ) )
self.assertTrue(hasattr(_A , '''metadata''' ) )
self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) )
def A ( self : Dict ) -> Dict:
pass
def A ( self : Tuple ) -> Dict:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : int = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Dict ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Optional[int] = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
if with_segmentation_maps:
UpperCAmelCase_ : Any = num_labels
if is_instance_map:
UpperCAmelCase_ : Any = list(range(_A ) ) * 2
UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) )
UpperCAmelCase_ : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations]
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , )
return inputs
def A ( self : int ) -> str:
pass
def A ( self : Tuple ) -> Union[str, Any]:
def common(_A : Optional[int]=False , _A : str=None ):
UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A )
UpperCAmelCase_ : List[Any] = inputs['''mask_labels''']
UpperCAmelCase_ : Optional[Any] = inputs['''class_labels''']
UpperCAmelCase_ : int = inputs['''pixel_values''']
UpperCAmelCase_ : Tuple = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_A , _A , _A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_A )
common(is_instance_map=_A , segmentation_type='''pil''' )
common(is_instance_map=_A , segmentation_type='''pil''' )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = np.zeros((20, 50) )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A )
self.assertEqual(len(_A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase_ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A )
self.assertEqual(len(_A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 304
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]:
if isinstance(A , A ):
UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A )
else:
UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A )
for i, tensor in enumerate(A ):
if padding_side == "right":
if isinstance(A , A ):
UpperCAmelCase_ : Tuple = tensor[:sequence_length]
else:
UpperCAmelCase_ : Dict = tensor[:sequence_length]
else:
if isinstance(A , A ):
UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( A : List[Any] ) -> str:
UpperCAmelCase_ : Dict = ord(A )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = -100
a_ = "pt"
def A ( self : List[Any] , _A : Dict ) -> Tuple:
import torch
UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase_ : Tuple = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase_ : Optional[Any] = [
list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels
]
else:
UpperCAmelCase_ : Any = [
[self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels
]
UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features]
UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A )
UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features]
UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A )
UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 304
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list[int] ) -> list[int]: # This function is recursive
UpperCAmelCase_ : Union[str, Any] = len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCAmelCase_ : Tuple = array[0]
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Tuple = [element for element in array[i:] if element >= array[i]]
UpperCAmelCase_ : str = longest_subsequence(A )
if len(A ) > len(A ):
UpperCAmelCase_ : Optional[int] = temp_array
else:
i += 1
UpperCAmelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
UpperCAmelCase_ : Optional[Any] = [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
|
'''simple docstring'''
import functools
def __UpperCAmelCase ( A : str , A : str ) -> int:
UpperCAmelCase_ : Optional[Any] = len(A )
UpperCAmelCase_ : List[str] = len(A )
@functools.cache
def min_distance(A : int , A : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 1
|
'''simple docstring'''
import string
def __UpperCAmelCase ( A : str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase_ : Dict = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase_ : Optional[int] = string.ascii_uppercase.find(A )
UpperCAmelCase_ : str = num - key
if num < 0:
UpperCAmelCase_ : Any = num + len(string.ascii_uppercase )
UpperCAmelCase_ : Optional[int] = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase_ : Tuple = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : str = input('''Encrypted message: ''' )
UpperCAmelCase_ : int = message.upper()
decrypt(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 304
|
'''simple docstring'''
def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1
UpperCAmelCase_ : Dict = []
for i in range(1 , n + 1 ):
UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator
UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator
if len(str(A ) ) > len(str(A ) ):
result.append(A )
UpperCAmelCase_ : Optional[Any] = numerator
UpperCAmelCase_ : Optional[int] = denominator
return len(A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 304
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
_UpperCamelCase : Union[str, Any] = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
_UpperCamelCase : str = {
'RUCAIBox/mvp': 1_024,
}
class snake_case__ ( UpperCamelCase):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
a_ = MvpTokenizer
def __init__( self : int , _A : Union[str, Any]=None , _A : List[str]=None , _A : Tuple=None , _A : str="replace" , _A : Optional[Any]="<s>" , _A : Optional[int]="</s>" , _A : str="</s>" , _A : Union[str, Any]="<s>" , _A : Optional[int]="<unk>" , _A : List[Any]="<pad>" , _A : int="<mask>" , _A : List[str]=False , _A : Any=True , **_A : Optional[Any] , ) -> Union[str, Any]:
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase_ : Optional[Any] = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase_ : Optional[Any] = add_prefix_space
UpperCAmelCase_ : Any = pre_tok_class(**_A )
UpperCAmelCase_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_ : List[Any] = '''post_processor'''
UpperCAmelCase_ : str = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase_ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ : str = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase_ : List[str] = tuple(state['''cls'''] )
UpperCAmelCase_ : List[str] = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase_ : Optional[int] = add_prefix_space
UpperCAmelCase_ : int = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase_ : str = trim_offsets
UpperCAmelCase_ : List[str] = True
if changes_to_apply:
UpperCAmelCase_ : Any = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase_ : str = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def A ( self : Dict ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def A ( self : Union[str, Any] , _A : str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase_ : str = value
def A ( self : Union[str, Any] , *_A : int , **_A : Optional[int] ) -> BatchEncoding:
UpperCAmelCase_ : Dict = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def A ( self : Optional[Any] , *_A : int , **_A : int ) -> BatchEncoding:
UpperCAmelCase_ : Dict = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def A ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase_ : Dict = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def A ( self : Tuple , _A : str , _A : Any=None ) -> Optional[Any]:
UpperCAmelCase_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : int , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 304
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : List[Any] = do_reduce_labels
def A ( self : Union[str, Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BeitImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _A )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _A )
def A ( self : Optional[Any] ) -> Any:
pass
def A ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> str:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
UpperCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs()
UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def A ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 304
| 1
|
'''simple docstring'''
import math
def __UpperCAmelCase ( A : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( A : float = 0.1 ) -> int:
UpperCAmelCase_ : Optional[int] = 3
UpperCAmelCase_ : Union[str, Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case__ ( enum.Enum):
a_ = 0
a_ = 1
a_ = 2
@add_end_docstrings(UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]:
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Dict = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params )
UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params}
def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : List[Any] = prefix
if prefix:
UpperCAmelCase_ : Tuple = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
UpperCAmelCase_ : Union[str, Any] = handle_long_generation
preprocess_params.update(_A )
UpperCAmelCase_ : Optional[int] = generate_kwargs
UpperCAmelCase_ : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : List[Any] = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase_ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict:
return super().__call__(_A , **_A )
def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : str = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens''']
else:
UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]:
UpperCAmelCase_ : Any = model_inputs['''input_ids''']
UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = 1
else:
UpperCAmelCase_ : Optional[int] = input_ids.shape[0]
UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
UpperCAmelCase_ : Any = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0]
UpperCAmelCase_ : int = model_outputs['''input_ids''']
UpperCAmelCase_ : str = model_outputs['''prompt_text''']
UpperCAmelCase_ : Any = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : Any = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[str] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : List[str] = {'''generated_text''': all_text}
records.append(_A )
return records
| 304
| 1
|
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
_UpperCamelCase : Optional[int] = TypeVar('T')
_UpperCamelCase : List[str] = Union[List[T], Tuple[T, ...]]
_UpperCamelCase : Dict = Union[T, List[T], Dict[str, T]]
_UpperCamelCase : Union[str, Any] = Union[str, bytes, os.PathLike]
| 304
|
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
)
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 )
print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 304
| 1
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> int:
UpperCAmelCase_ : Any = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_A )
UpperCAmelCase_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
UpperCAmelCase_ : int = tokenizer('''This is me''' , return_tensors='''pt''' )
UpperCAmelCase_ : Optional[int] = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase_ : int = model.generate(**_A )
UpperCAmelCase_ : Tuple = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
UpperCAmelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase_ : Tuple = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def A ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
UpperCAmelCase_ : str = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
UpperCAmelCase_ : Optional[int] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 304
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : List[Any] = result + left + right
return input_list
def __UpperCAmelCase ( A : list ) -> list:
if len(A ) <= 1:
return input_list
UpperCAmelCase_ : List[str] = list(A )
# iteration for two-way merging
UpperCAmelCase_ : Tuple = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A ) , A ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : int = i + p - 1
UpperCAmelCase_ : Any = (low + high + 1) // 2
UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A )
# final merge of last two parts
if p * 2 >= len(A ):
UpperCAmelCase_ : str = i
UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_UpperCamelCase : List[str] = []
else:
_UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 304
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class snake_case__ ( UpperCamelCase):
a_ = "transfo-xl"
a_ = ["mems"]
a_ = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , _A : List[str]=26_77_35 , _A : List[str]=[2_00_00, 4_00_00, 20_00_00] , _A : Optional[int]=10_24 , _A : List[str]=10_24 , _A : Tuple=16 , _A : Tuple=64 , _A : Optional[int]=40_96 , _A : List[str]=4 , _A : Optional[Any]=False , _A : Any=18 , _A : int=16_00 , _A : Any=10_00 , _A : List[Any]=True , _A : Dict=True , _A : str=0 , _A : int=-1 , _A : Optional[int]=True , _A : int=0.1 , _A : Dict=0.0 , _A : Tuple=True , _A : Tuple="normal" , _A : Tuple=0.01 , _A : Tuple=0.01 , _A : List[Any]=0.02 , _A : Union[str, Any]=1e-5 , _A : List[str]=0 , **_A : Union[str, Any] , ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Optional[Any] = []
self.cutoffs.extend(_A )
if proj_share_all_but_first:
UpperCAmelCase_ : Tuple = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase_ : List[Any] = [False] + [False] * len(self.cutoffs )
UpperCAmelCase_ : Optional[int] = d_model
UpperCAmelCase_ : Tuple = d_embed
UpperCAmelCase_ : List[Any] = d_head
UpperCAmelCase_ : Any = d_inner
UpperCAmelCase_ : Optional[int] = div_val
UpperCAmelCase_ : List[str] = pre_lnorm
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : str = n_head
UpperCAmelCase_ : str = mem_len
UpperCAmelCase_ : int = same_length
UpperCAmelCase_ : Tuple = attn_type
UpperCAmelCase_ : List[Any] = clamp_len
UpperCAmelCase_ : Any = sample_softmax
UpperCAmelCase_ : Union[str, Any] = adaptive
UpperCAmelCase_ : Dict = dropout
UpperCAmelCase_ : List[str] = dropatt
UpperCAmelCase_ : Any = untie_r
UpperCAmelCase_ : Union[str, Any] = init
UpperCAmelCase_ : Optional[Any] = init_range
UpperCAmelCase_ : List[str] = proj_init_std
UpperCAmelCase_ : str = init_std
UpperCAmelCase_ : int = layer_norm_epsilon
super().__init__(eos_token_id=_A , **_A )
@property
def A ( self : str ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def A ( self : str , _A : Any ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 304
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def A ( self : Tuple ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def A ( self : List[Any] ) -> Union[str, Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def A ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def A ( self : Optional[int] ) -> torch.Tensor:
UpperCAmelCase_ : Dict = torch.arange(self.height * self.width )
UpperCAmelCase_ : int = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) )
UpperCAmelCase_ : Any = self.get_image_coords()
UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A )
UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor:
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 )
UpperCAmelCase_ : Union[str, Any] = self.resolution()
UpperCAmelCase_ : int = self.fov()
UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(_A , 1 , 3 )
+ self.x.view(_A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A )
UpperCAmelCase_ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3 )
def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Optional[int] = -z * 4
UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] )
UpperCAmelCase_ : List[Any] = np.cross(A , A )
origins.append(A )
xs.append(A )
ys.append(A )
zs.append(A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
| 304
| 1
|
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( A : List[str]=None , A : Optional[int]=None ) -> Any:
return field(default_factory=lambda: default , metadata=A )
@dataclass
class snake_case__ :
a_ = field(
metadata={"help": "The csv file to plot."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Disable logarithmic scale when plotting"} , )
a_ = field(
default=UpperCamelCase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
a_ = list_field(
default=UpperCamelCase , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def __UpperCAmelCase ( A : Optional[int] ) -> Optional[int]:
try:
int(A )
return True
except ValueError:
return False
def __UpperCAmelCase ( A : Tuple ) -> List[str]:
try:
float(A )
return True
except ValueError:
return False
class snake_case__ :
def __init__( self : Optional[int] , _A : Optional[int] ) -> str:
UpperCAmelCase_ : int = args
UpperCAmelCase_ : List[str] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase_ : Dict = csv.DictReader(_A )
for row in reader:
UpperCAmelCase_ : List[Any] = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase_ : List[str] = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase_ : Union[str, Any] = float(row['''result'''] )
def A ( self : Optional[int] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Any = plt.subplots()
UpperCAmelCase_ : Any = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase_ : int = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase_ : List[str] = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase_ : Dict = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase_ : Tuple = self.result_dict[model_name]['''result''']
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase_ : Any = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase_ : List[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_A , )
else:
UpperCAmelCase_ : Optional[Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase_ : int = np.asarray(_A , _A )[: len(_A )]
plt.scatter(
_A , _A , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(_A , _A , '''--''' )
title_str += F" {label_model_name} vs."
UpperCAmelCase_ : Tuple = title_str[:-4]
UpperCAmelCase_ : int = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_A )
plt.xlabel(_A )
plt.ylabel(_A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : Optional[int] = HfArgumentParser(A )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ : Dict = Plot(args=A )
plot.plot()
if __name__ == "__main__":
main()
| 304
|
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : str ) -> tuple[list[int], list[int]]:
UpperCAmelCase_ : Dict = [ord(_A ) for i in text]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
for i in plain:
UpperCAmelCase_ : int = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : list[int] , _A : list[int] ) -> str:
UpperCAmelCase_ : Dict = []
for i in range(len(_A ) ):
UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304
| 1
|
'''simple docstring'''
from ... import PretrainedConfig
_UpperCamelCase : List[Any] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class snake_case__ ( UpperCamelCase):
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = "nezha"
def __init__( self : List[str] , _A : Dict=2_11_28 , _A : str=7_68 , _A : Any=12 , _A : Any=12 , _A : Optional[int]=30_72 , _A : Tuple="gelu" , _A : Optional[Any]=0.1 , _A : List[Any]=0.1 , _A : Union[str, Any]=5_12 , _A : Any=64 , _A : List[str]=2 , _A : Optional[int]=0.02 , _A : Any=1e-12 , _A : Optional[int]=0.1 , _A : str=0 , _A : Union[str, Any]=2 , _A : Union[str, Any]=3 , _A : List[str]=True , **_A : List[str] , ) -> List[str]:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = max_relative_position
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = classifier_dropout
UpperCAmelCase_ : List[str] = use_cache
| 304
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = ReformerTokenizer
a_ = ReformerTokenizerFast
a_ = True
a_ = False
a_ = True
def A ( self : Optional[Any] ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[Any] = '''<s>'''
UpperCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_A ) , 10_00 )
def A ( self : Optional[int] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A ( self : Optional[Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(_A )
UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def A ( self : Tuple , _A : Dict=15 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
UpperCAmelCase_ : Optional[int] = '''This is a simple input'''
UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def A ( self : Union[str, Any] ) -> int:
pass
def A ( self : int ) -> Any:
UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A )
UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A ( self : List[str] ) -> Optional[int]:
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : Tuple = '''Hello World!'''
UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def A ( self : List[Any] ) -> str:
UpperCAmelCase_ : Tuple = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase_ : int = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def A ( self : List[str] ) -> Optional[int]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ : List[Any] = ''' '''.join(_A )
UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' )
UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
UpperCAmelCase_ : List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape
UpperCAmelCase_ : Optional[int] = ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def A ( self : int ) -> Optional[Any]:
# fmt: off
UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ : Optional[Any] = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
| 304
| 1
|
'''simple docstring'''
import re
def __UpperCAmelCase ( A : str ) -> list:
return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def __UpperCAmelCase ( A : str ) -> str:
UpperCAmelCase_ : Any = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __UpperCAmelCase ( A : str , A : bool , A : str ) -> str:
try:
UpperCAmelCase_ : List[Any] = split_input(A )
if upper:
UpperCAmelCase_ : Tuple = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase_ : List[Any] = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __UpperCAmelCase ( A : str ) -> str:
return to_simple_case(A )
def __UpperCAmelCase ( A : str ) -> str:
try:
UpperCAmelCase_ : Tuple = to_simple_case(A )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __UpperCAmelCase ( A : str , A : bool ) -> str:
return to_complex_case(A , A , '''_''' )
def __UpperCAmelCase ( A : str , A : bool ) -> str:
return to_complex_case(A , A , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 304
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : str ) -> list[int]:
return [ord(A ) - 9_6 for elem in plain]
def __UpperCAmelCase ( A : list[int] ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , A )
print('''Decoded:''' , decode(A ) )
if __name__ == "__main__":
main()
| 304
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class snake_case__ :
a_ = BlenderbotSmallConfig
a_ = {}
a_ = "gelu"
def __init__( self : str , _A : List[str] , _A : List[Any]=13 , _A : Dict=7 , _A : List[Any]=True , _A : Optional[Any]=False , _A : Dict=99 , _A : List[str]=32 , _A : Tuple=2 , _A : str=4 , _A : Any=37 , _A : int=0.1 , _A : Tuple=0.1 , _A : List[str]=20 , _A : List[str]=2 , _A : int=1 , _A : Any=0 , ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = eos_token_id
UpperCAmelCase_ : Any = pad_token_id
UpperCAmelCase_ : List[str] = bos_token_id
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ : Optional[Any] = prepare_blenderbot_small_inputs_dict(_A , _A , _A )
return config, inputs_dict
def A ( self : Any , _A : Union[str, Any] , _A : int ) -> Dict:
UpperCAmelCase_ : str = TFBlenderbotSmallModel(config=_A ).get_decoder()
UpperCAmelCase_ : Optional[Any] = inputs_dict['''input_ids''']
UpperCAmelCase_ : List[Any] = input_ids[:1, :]
UpperCAmelCase_ : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase_ : List[Any] = inputs_dict['''head_mask''']
UpperCAmelCase_ : str = 1
# first forward pass
UpperCAmelCase_ : int = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase_ , UpperCAmelCase_ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ : Dict = model(_A , attention_mask=_A )[0]
UpperCAmelCase_ : List[Any] = model(_A , attention_mask=_A , past_key_values=_A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_A , _A , rtol=1e-3 )
def __UpperCAmelCase ( A : Optional[Any] , A : str , A : Union[str, Any] , A : Tuple=None , A : Any=None , A : List[Any]=None , A : List[str]=None , A : Union[str, Any]=None , ) -> List[Any]:
if attention_mask is None:
UpperCAmelCase_ : Optional[Any] = tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ = True
a_ = False
a_ = False
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : str = TFBlenderbotSmallModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=_A )
def A ( self : List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def A ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_tokenizers
@require_tf
class snake_case__ ( unittest.TestCase):
a_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
a_ = "facebook/blenderbot_small-90M"
@cached_property
def A ( self : Any ) -> str:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def A ( self : Dict ) -> int:
UpperCAmelCase_ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A ( self : Any ) -> str:
UpperCAmelCase_ : int = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase_ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_A , )
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 304
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 304
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : List[Any] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 304
|
'''simple docstring'''
def __UpperCAmelCase ( A : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCAmelCase_ : int = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
UpperCAmelCase_ : List[str] = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( A : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__ ( unittest.TestCase):
@property
def A ( self : Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Optional[Any] = KarrasVeScheduler()
UpperCAmelCase_ : List[Any] = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=_A , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe(num_inference_steps=2 , generator=_A , output_type='''numpy''' , return_dict=_A )[0]
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Optional[int] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class snake_case__ ( unittest.TestCase):
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Any = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : Any = UNetaDModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : List[Any] = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : str = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe(num_inference_steps=20 , generator=_A , output_type='''numpy''' ).images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 304
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCamelCase : Any = logging.getLogger(__name__)
class snake_case__ ( UpperCamelCase):
a_ = "masked_bert"
def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = pruning_method
UpperCAmelCase_ : Optional[int] = mask_init
UpperCAmelCase_ : List[Any] = mask_scale
| 304
| 1
|
'''simple docstring'''
_UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCamelCase : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 304
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionDiffEditPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
def A ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : str , _A : List[str] , _A : Any=0 ) -> str:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1e-4 )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCAmelCase_ : int = pipe.generate_mask(**_A )
UpperCAmelCase_ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : List[Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = '''cpu'''
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : Tuple ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Any = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
UpperCAmelCase_ : Any = raw_image
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Tuple = '''a bowl of pears'''
UpperCAmelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[str] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase_ : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Dict = '''a bowl of pears'''
UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase_ : Dict = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 304
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a_ = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"question": Value("string"), "context": Value("string")})
a_ = Features(
{
"answers": Sequence(
{
"text": Value("string"),
"answer_start": Value("int32"),
})
})
a_ = "question"
a_ = "context"
a_ = "answers"
@property
def A ( self : List[str] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 304
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( A : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A , A ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"text": Value("string")})
a_ = Features({})
a_ = "text"
@property
def A ( self : List[str] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 304
| 1
|
'''simple docstring'''
_UpperCamelCase : Optional[Any] = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
_UpperCamelCase : int = frozenset(['prompt', 'negative_prompt'])
_UpperCamelCase : List[Any] = frozenset([])
_UpperCamelCase : Dict = frozenset(['image'])
_UpperCamelCase : Union[str, Any] = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
_UpperCamelCase : str = frozenset(['image'])
_UpperCamelCase : str = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
_UpperCamelCase : List[str] = frozenset(['prompt', 'image', 'negative_prompt'])
_UpperCamelCase : Optional[int] = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
_UpperCamelCase : Union[str, Any] = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
_UpperCamelCase : List[Any] = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
_UpperCamelCase : List[Any] = frozenset(['image', 'mask_image'])
_UpperCamelCase : Optional[Any] = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
_UpperCamelCase : Any = frozenset(['example_image', 'image', 'mask_image'])
_UpperCamelCase : Union[str, Any] = frozenset(['class_labels'])
_UpperCamelCase : List[str] = frozenset(['class_labels'])
_UpperCamelCase : Union[str, Any] = frozenset(['batch_size'])
_UpperCamelCase : Any = frozenset([])
_UpperCamelCase : List[str] = frozenset(['batch_size'])
_UpperCamelCase : Tuple = frozenset([])
_UpperCamelCase : str = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
_UpperCamelCase : Optional[int] = frozenset(['prompt', 'negative_prompt'])
_UpperCamelCase : Any = frozenset(['input_tokens'])
_UpperCamelCase : Tuple = frozenset(['input_tokens'])
| 304
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict:
with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(A )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
for key, info in class_info.items():
UpperCAmelCase_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A ) )
UpperCAmelCase_ : Any = thing_ids
UpperCAmelCase_ : Union[str, Any] = class_names
return metadata
class snake_case__ ( unittest.TestCase):
def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Optional[int] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
UpperCAmelCase_ : str = class_info_file
UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A )
UpperCAmelCase_ : Tuple = num_text
UpperCAmelCase_ : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Dict = 10
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = do_reduce_labels
UpperCAmelCase_ : str = ignore_index
def A ( self : Dict ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]:
if not batched:
UpperCAmelCase_ : Any = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ : int = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ : Dict = self.size['''shortest_edge''']
UpperCAmelCase_ : str = self.size['''shortest_edge''']
else:
UpperCAmelCase_ : Dict = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
def A ( self : Tuple ) -> str:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ = image_processing_class
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : int = OneFormerImageProcessorTester(self )
@property
def A ( self : Any ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''ignore_index''' ) )
self.assertTrue(hasattr(_A , '''class_info_file''' ) )
self.assertTrue(hasattr(_A , '''num_text''' ) )
self.assertTrue(hasattr(_A , '''repo_path''' ) )
self.assertTrue(hasattr(_A , '''metadata''' ) )
self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) )
def A ( self : Dict ) -> Dict:
pass
def A ( self : Tuple ) -> Dict:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : int = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Dict ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Optional[int] = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
if with_segmentation_maps:
UpperCAmelCase_ : Any = num_labels
if is_instance_map:
UpperCAmelCase_ : Any = list(range(_A ) ) * 2
UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) )
UpperCAmelCase_ : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations]
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , )
return inputs
def A ( self : int ) -> str:
pass
def A ( self : Tuple ) -> Union[str, Any]:
def common(_A : Optional[int]=False , _A : str=None ):
UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A )
UpperCAmelCase_ : List[Any] = inputs['''mask_labels''']
UpperCAmelCase_ : Optional[Any] = inputs['''class_labels''']
UpperCAmelCase_ : int = inputs['''pixel_values''']
UpperCAmelCase_ : Tuple = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_A , _A , _A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_A )
common(is_instance_map=_A , segmentation_type='''pil''' )
common(is_instance_map=_A , segmentation_type='''pil''' )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = np.zeros((20, 50) )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A )
self.assertEqual(len(_A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase_ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A )
self.assertEqual(len(_A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 304
| 1
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __UpperCAmelCase ( A : Optional[int] ) -> Optional[int]:
return EnvironmentCommand()
class snake_case__ ( UpperCamelCase):
@staticmethod
def A ( _A : ArgumentParser ) -> List[Any]:
UpperCAmelCase_ : Any = parser.add_parser('''env''' )
download_parser.set_defaults(func=_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : List[str] = huggingface_hub.__version__
UpperCAmelCase_ : List[str] = '''not installed'''
UpperCAmelCase_ : Union[str, Any] = '''NA'''
if is_torch_available():
import torch
UpperCAmelCase_ : List[Any] = torch.__version__
UpperCAmelCase_ : int = torch.cuda.is_available()
UpperCAmelCase_ : Optional[Any] = '''not installed'''
if is_transformers_available():
import transformers
UpperCAmelCase_ : List[str] = transformers.__version__
UpperCAmelCase_ : int = '''not installed'''
if is_accelerate_available():
import accelerate
UpperCAmelCase_ : Union[str, Any] = accelerate.__version__
UpperCAmelCase_ : Dict = '''not installed'''
if is_xformers_available():
import xformers
UpperCAmelCase_ : Any = xformers.__version__
UpperCAmelCase_ : Optional[int] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': F"{pt_version} ({pt_cuda_available})",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_A ) )
return info
@staticmethod
def A ( _A : Dict ) -> str:
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 304
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
| 1
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
_UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."})
a_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
a_ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a_ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
a_ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]:
def _dataset(A : Dict , A : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __UpperCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
UpperCAmelCase_ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase_ : str = (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_ : Any = (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ : Any = Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
UpperCAmelCase_ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Dict = trainer.evaluate()
UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity}
UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(A )
return results
def __UpperCAmelCase ( A : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 304
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
_UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."})
a_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
a_ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a_ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
a_ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]:
def _dataset(A : Dict , A : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __UpperCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
UpperCAmelCase_ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase_ : str = (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_ : Any = (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ : Any = Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
UpperCAmelCase_ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Dict = trainer.evaluate()
UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity}
UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(A )
return results
def __UpperCAmelCase ( A : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 304
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_UpperCamelCase : Optional[int] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __UpperCAmelCase ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase_ : Union[str, Any] = get_sagemaker_input()
else:
UpperCAmelCase_ : Optional[int] = get_cluster_input()
return config
def __UpperCAmelCase ( A : Optional[int]=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase_ : Tuple = subparsers.add_parser('''config''' , description=A )
else:
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser('''Accelerate config command''' , description=A )
parser.add_argument(
'''--config_file''' , default=A , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=A )
return parser
def __UpperCAmelCase ( A : Tuple ) -> List[Any]:
UpperCAmelCase_ : int = get_user_input()
if args.config_file is not None:
UpperCAmelCase_ : Optional[int] = args.config_file
else:
if not os.path.isdir(A ):
os.makedirs(A )
UpperCAmelCase_ : Optional[int] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(A )
else:
config.to_yaml_file(A )
print(F"accelerate configuration saved at {config_file}" )
def __UpperCAmelCase ( ) -> Dict:
UpperCAmelCase_ : List[Any] = config_command_parser()
UpperCAmelCase_ : List[str] = parser.parse_args()
config_command(A )
if __name__ == "__main__":
main()
| 304
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.